11use async_trait:: async_trait;
2- use cubeclient:: models:: { V1LoadRequestQuery , V1LoadResult , V1LoadResultAnnotation } ;
2+ use cubeclient:: models:: { V1LoadRequestQuery , V1LoadResponse } ;
33pub use datafusion:: {
44 arrow:: {
55 array:: {
@@ -52,7 +52,7 @@ use datafusion::{
5252 logical_plan:: JoinType ,
5353 scalar:: ScalarValue ,
5454} ;
55- use serde_json:: { json , Value } ;
55+ use serde_json:: Value ;
5656
5757#[ derive( Debug , Clone , Eq , PartialEq ) ]
5858pub enum MemberField {
@@ -655,28 +655,22 @@ impl ExecutionPlan for CubeScanExecutionPlan {
655655 ) ) ) ;
656656 }
657657
658- let mut response = JsonValueObject :: new (
659- load_data (
660- self . span_id . clone ( ) ,
661- request,
662- self . auth_context . clone ( ) ,
663- self . transport . clone ( ) ,
664- meta. clone ( ) ,
665- self . schema . clone ( ) ,
666- self . options . clone ( ) ,
667- self . wrapped_sql . clone ( ) ,
668- )
669- . await ?
670- . data ,
671- ) ;
672- one_shot_stream. data = Some (
673- transform_response (
674- & mut response,
675- one_shot_stream. schema . clone ( ) ,
676- & one_shot_stream. member_fields ,
677- )
678- . map_err ( |e| DataFusionError :: Execution ( e. message . to_string ( ) ) ) ?,
679- ) ;
658+ let response = load_data (
659+ self . span_id . clone ( ) ,
660+ request,
661+ self . auth_context . clone ( ) ,
662+ self . transport . clone ( ) ,
663+ meta. clone ( ) ,
664+ self . schema . clone ( ) ,
665+ self . member_fields . clone ( ) ,
666+ self . options . clone ( ) ,
667+ self . wrapped_sql . clone ( ) ,
668+ )
669+ . await ?;
670+
671+ // For now execute method executes only one query at a time, so we
672+ // take the first result
673+ one_shot_stream. data = Some ( response. first ( ) . unwrap ( ) . clone ( ) ) ;
680674
681675 Ok ( Box :: pin ( CubeScanStreamRouter :: new (
682676 None ,
@@ -842,9 +836,10 @@ async fn load_data(
842836 transport : Arc < dyn TransportService > ,
843837 meta : LoadRequestMeta ,
844838 schema : SchemaRef ,
839+ member_fields : Vec < MemberField > ,
845840 options : CubeScanOptions ,
846841 sql_query : Option < SqlQuery > ,
847- ) -> ArrowResult < V1LoadResult > {
842+ ) -> ArrowResult < Vec < RecordBatch > > {
848843 let no_members_query = request. measures . as_ref ( ) . map ( |v| v. len ( ) ) . unwrap_or ( 0 ) == 0
849844 && request. dimensions . as_ref ( ) . map ( |v| v. len ( ) ) . unwrap_or ( 0 ) == 0
850845 && request
@@ -862,22 +857,27 @@ async fn load_data(
862857 data. push ( serde_json:: Value :: Null )
863858 }
864859
865- V1LoadResult :: new (
866- V1LoadResultAnnotation {
867- measures : json ! ( Vec :: <serde_json:: Value >:: new( ) ) ,
868- dimensions : json ! ( Vec :: <serde_json:: Value >:: new( ) ) ,
869- segments : json ! ( Vec :: <serde_json:: Value >:: new( ) ) ,
870- time_dimensions : json ! ( Vec :: <serde_json:: Value >:: new( ) ) ,
871- } ,
872- data,
873- )
860+ let mut response = JsonValueObject :: new ( data) ;
861+ let rec = transform_response ( & mut response, schema. clone ( ) , & member_fields)
862+ . map_err ( |e| DataFusionError :: Execution ( e. message . to_string ( ) ) ) ?;
863+
864+ rec
874865 } else {
875866 let result = transport
876- . load ( span_id, request, sql_query, auth_context, meta, schema)
877- . await ;
878- let mut response = result. map_err ( |err| ArrowError :: ComputeError ( err. to_string ( ) ) ) ?;
879- if let Some ( data) = response. results . pop ( ) {
880- match ( options. max_records , data. data . len ( ) ) {
867+ . load (
868+ span_id,
869+ request,
870+ sql_query,
871+ auth_context,
872+ meta,
873+ schema,
874+ member_fields,
875+ )
876+ . await
877+ . map_err ( |err| ArrowError :: ComputeError ( err. to_string ( ) ) ) ?;
878+ let response = result. first ( ) ;
879+ if let Some ( data) = response. cloned ( ) {
880+ match ( options. max_records , data. num_rows ( ) ) {
881881 ( Some ( max_records) , len) if len >= max_records => {
882882 return Err ( ArrowError :: ComputeError ( format ! ( "One of the Cube queries exceeded the maximum row limit ({}). JOIN/UNION is not possible as it will produce incorrect results. Try filtering the results more precisely or moving post-processing functions to an outer query." , max_records) ) ) ;
883883 }
@@ -892,7 +892,7 @@ async fn load_data(
892892 }
893893 } ;
894894
895- Ok ( result)
895+ Ok ( vec ! [ result] )
896896}
897897
898898fn load_to_stream_sync ( one_shot_stream : & mut CubeScanOneShotStream ) -> Result < ( ) > {
@@ -902,6 +902,7 @@ fn load_to_stream_sync(one_shot_stream: &mut CubeScanOneShotStream) -> Result<()
902902 let transport = one_shot_stream. transport . clone ( ) ;
903903 let meta = one_shot_stream. meta . clone ( ) ;
904904 let schema = one_shot_stream. schema . clone ( ) ;
905+ let member_fields = one_shot_stream. member_fields . clone ( ) ;
905906 let options = one_shot_stream. options . clone ( ) ;
906907 let wrapped_sql = one_shot_stream. wrapped_sql . clone ( ) ;
907908
@@ -914,22 +915,16 @@ fn load_to_stream_sync(one_shot_stream: &mut CubeScanOneShotStream) -> Result<()
914915 transport,
915916 meta,
916917 schema,
918+ member_fields,
917919 options,
918920 wrapped_sql,
919921 ) )
920922 } )
921923 . join ( )
922- . map_err ( |_| DataFusionError :: Execution ( format ! ( "Can't load to stream" ) ) ) ?;
923-
924- let mut response = JsonValueObject :: new ( res. unwrap ( ) . data ) ;
925- one_shot_stream. data = Some (
926- transform_response (
927- & mut response,
928- one_shot_stream. schema . clone ( ) ,
929- & one_shot_stream. member_fields ,
930- )
931- . map_err ( |e| DataFusionError :: Execution ( e. message . to_string ( ) ) ) ?,
932- ) ;
924+ . map_err ( |_| DataFusionError :: Execution ( format ! ( "Can't load to stream" ) ) ) ??;
925+
926+ let response = res. first ( ) ;
927+ one_shot_stream. data = Some ( response. cloned ( ) . unwrap ( ) ) ;
933928
934929 Ok ( ( ) )
935930}
@@ -1335,6 +1330,21 @@ pub fn transform_response<V: ValueObject>(
13351330 Ok ( RecordBatch :: try_new ( schema. clone ( ) , columns) ?)
13361331}
13371332
1333+ pub fn convert_transport_response (
1334+ response : V1LoadResponse ,
1335+ schema : SchemaRef ,
1336+ member_fields : Vec < MemberField > ,
1337+ ) -> std:: result:: Result < Vec < RecordBatch > , CubeError > {
1338+ response
1339+ . results
1340+ . into_iter ( )
1341+ . map ( |r| {
1342+ let mut response = JsonValueObject :: new ( r. data . clone ( ) ) ;
1343+ transform_response ( & mut response, schema. clone ( ) , & member_fields)
1344+ } )
1345+ . collect :: < std:: result:: Result < Vec < RecordBatch > , CubeError > > ( )
1346+ }
1347+
13381348#[ cfg( test) ]
13391349mod tests {
13401350 use super :: * ;
@@ -1398,10 +1408,12 @@ mod tests {
13981408 _sql_query : Option < SqlQuery > ,
13991409 _ctx : AuthContextRef ,
14001410 _meta_fields : LoadRequestMeta ,
1401- _schema : SchemaRef ,
1402- ) -> Result < V1LoadResponse , CubeError > {
1411+ schema : SchemaRef ,
1412+ member_fields : Vec < MemberField > ,
1413+ ) -> Result < Vec < RecordBatch > , CubeError > {
14031414 let response = r#"
1404- {
1415+ {
1416+ "results": [{
14051417 "annotation": {
14061418 "measures": [],
14071419 "dimensions": [],
@@ -1415,17 +1427,13 @@ mod tests {
14151427 {"KibanaSampleDataEcommerce.count": null, "KibanaSampleDataEcommerce.maxPrice": null, "KibanaSampleDataEcommerce.isBool": "true", "KibanaSampleDataEcommerce.orderDate": "9999-12-31 00:00:00.000", "KibanaSampleDataEcommerce.city": "City 4"},
14161428 {"KibanaSampleDataEcommerce.count": null, "KibanaSampleDataEcommerce.maxPrice": null, "KibanaSampleDataEcommerce.isBool": "false", "KibanaSampleDataEcommerce.orderDate": null, "KibanaSampleDataEcommerce.city": null}
14171429 ]
1418- }
1430+ }]
1431+ }
14191432 "# ;
14201433
1421- let result: V1LoadResult = serde_json:: from_str ( response) . unwrap ( ) ;
1422-
1423- Ok ( V1LoadResponse {
1424- pivot_query : None ,
1425- slow_query : None ,
1426- query_type : None ,
1427- results : vec ! [ result] ,
1428- } )
1434+ let result: V1LoadResponse = serde_json:: from_str ( response) . unwrap ( ) ;
1435+ convert_transport_response ( result, schema. clone ( ) , member_fields)
1436+ . map_err ( |err| CubeError :: user ( err. to_string ( ) ) )
14291437 }
14301438
14311439 async fn load_stream (
0 commit comments