1
- use crate :: dataframe_query_common:: {
2
- ChunkInfo , align_record_batch_to_schema, compute_partition_stream_chunk_info,
3
- prepend_string_column_schema,
4
- } ;
1
+ use std:: any:: Any ;
2
+ use std:: collections:: BTreeMap ;
3
+ use std:: fmt:: Debug ;
4
+ use std:: pin:: Pin ;
5
+ use std:: sync:: Arc ;
6
+ use std:: task:: { Context , Poll } ;
7
+
5
8
use arrow:: array:: { Array , RecordBatch , RecordBatchOptions , StringArray } ;
6
9
use arrow:: compute:: SortOptions ;
7
10
use arrow:: datatypes:: { Schema , SchemaRef } ;
@@ -17,28 +20,28 @@ use datafusion::physical_plan::execution_plan::{Boundedness, EmissionType};
17
20
use datafusion:: physical_plan:: { DisplayAs , DisplayFormatType , ExecutionPlan , PlanProperties } ;
18
21
use datafusion:: { error:: DataFusionError , execution:: SendableRecordBatchStream } ;
19
22
use futures_util:: { Stream , StreamExt as _} ;
23
+ use tokio:: runtime:: Handle ;
24
+ use tokio:: sync:: Notify ;
25
+ use tokio:: sync:: mpsc:: { Receiver , Sender } ;
26
+ use tokio:: task:: JoinHandle ;
27
+ use tracing:: Instrument as _;
28
+
20
29
use re_dataframe:: external:: re_chunk:: Chunk ;
21
30
use re_dataframe:: external:: re_chunk_store:: ChunkStore ;
22
31
use re_dataframe:: {
23
32
ChunkStoreHandle , Index , QueryCache , QueryEngine , QueryExpression , QueryHandle , StorageEngine ,
24
33
} ;
25
- use re_grpc_client:: ConnectionClient ;
26
34
use re_log_types:: { ApplicationId , StoreId , StoreInfo , StoreKind , StoreSource } ;
27
35
use re_protos:: cloud:: v1alpha1:: DATASET_MANIFEST_ID_FIELD_NAME ;
28
36
use re_protos:: cloud:: v1alpha1:: GetChunksRequest ;
29
37
use re_protos:: common:: v1alpha1:: PartitionId ;
38
+ use re_redap_client:: ConnectionClient ;
30
39
use re_sorbet:: { ColumnDescriptor , ColumnSelector } ;
31
- use std:: any:: Any ;
32
- use std:: collections:: BTreeMap ;
33
- use std:: fmt:: Debug ;
34
- use std:: pin:: Pin ;
35
- use std:: sync:: Arc ;
36
- use std:: task:: { Context , Poll } ;
37
- use tokio:: runtime:: Handle ;
38
- use tokio:: sync:: Notify ;
39
- use tokio:: sync:: mpsc:: { Receiver , Sender } ;
40
- use tokio:: task:: JoinHandle ;
41
- use tracing:: Instrument as _;
40
+
41
+ use crate :: dataframe_query_common:: {
42
+ ChunkInfo , align_record_batch_to_schema, compute_partition_stream_chunk_info,
43
+ prepend_string_column_schema,
44
+ } ;
42
45
43
46
/// This parameter sets the back pressure that either the streaming provider
44
47
/// can place on the CPU worker thread or the CPU worker thread can place on
@@ -455,7 +458,7 @@ async fn chunk_stream_io_loop(
455
458
456
459
// Then we need to fully decode these chunks, i.e. both the transport layer (Protobuf)
457
460
// and the app layer (Arrow).
458
- let mut chunk_stream = re_grpc_client :: get_chunks_response_to_chunk_and_partition_id (
461
+ let mut chunk_stream = re_redap_client :: get_chunks_response_to_chunk_and_partition_id (
459
462
get_chunks_response_stream,
460
463
) ;
461
464
0 commit comments