@@ -12,6 +12,7 @@ use crate::metastore::{
1212use crate :: queryplanner:: merge_sort:: LastRowByUniqueKeyExec ;
1313use crate :: queryplanner:: metadata_cache:: MetadataCacheFactory ;
1414use crate :: queryplanner:: trace_data_loaded:: { DataLoadedSize , TraceDataLoadedExec } ;
15+ use crate :: queryplanner:: QueryPlannerImpl ;
1516use crate :: remotefs:: { ensure_temp_file_is_dropped, RemoteFs } ;
1617use crate :: store:: { min_max_values_from_data, ChunkDataStore , ChunkStore , ROW_GROUP_SIZE } ;
1718use crate :: table:: data:: { cmp_min_rows, cmp_partition_key} ;
@@ -190,11 +191,14 @@ impl CompactionServiceImpl {
190191 let deactivate_res = self
191192 . deactivate_and_mark_failed_chunks_for_replay ( failed)
192193 . await ;
194+
195+ let task_context = QueryPlannerImpl :: execution_context_helper ( self . metadata_cache_factory . cache_factory ( ) . make_session_config ( ) ) . task_ctx ( ) ;
196+
193197 let in_memory_res = self
194- . compact_chunks_to_memory ( mem_chunks, & partition, & index, & table)
198+ . compact_chunks_to_memory ( mem_chunks, & partition, & index, & table, task_context . clone ( ) )
195199 . await ;
196200 let persistent_res = self
197- . compact_chunks_to_persistent ( persistent_chunks, & partition, & index, & table)
201+ . compact_chunks_to_persistent ( persistent_chunks, & partition, & index, & table, task_context )
198202 . await ;
199203 deactivate_res?;
200204 in_memory_res?;
@@ -209,6 +213,7 @@ impl CompactionServiceImpl {
209213 partition : & IdRow < Partition > ,
210214 index : & IdRow < Index > ,
211215 table : & IdRow < Table > ,
216+ task_context : Arc < TaskContext > ,
212217 ) -> Result < ( ) , CubeError > {
213218 if chunks. is_empty ( ) {
214219 return Ok ( ( ) ) ;
@@ -290,6 +295,7 @@ impl CompactionServiceImpl {
290295 in_memory_columns,
291296 unique_key. clone ( ) ,
292297 aggregate_columns. clone ( ) ,
298+ task_context. clone ( ) ,
293299 )
294300 . await ?;
295301 let batches = collect ( batches_stream) . await ?;
@@ -337,6 +343,7 @@ impl CompactionServiceImpl {
337343 partition : & IdRow < Partition > ,
338344 index : & IdRow < Index > ,
339345 table : & IdRow < Table > ,
346+ task_context : Arc < TaskContext > ,
340347 ) -> Result < ( ) , CubeError > {
341348 if chunks. is_empty ( ) {
342349 return Ok ( ( ) ) ;
@@ -381,6 +388,7 @@ impl CompactionServiceImpl {
381388 in_memory_columns,
382389 unique_key. clone ( ) ,
383390 aggregate_columns. clone ( ) ,
391+ task_context,
384392 )
385393 . await ?;
386394
@@ -687,8 +695,9 @@ impl CompactionService for CompactionServiceImpl {
687695 IndexType :: Regular => None ,
688696 IndexType :: Aggregate => Some ( table. get_row ( ) . aggregate_columns ( ) ) ,
689697 } ;
698+ let task_context = QueryPlannerImpl :: execution_context_helper ( self . metadata_cache_factory . cache_factory ( ) . make_session_config ( ) ) . task_ctx ( ) ;
690699 let records =
691- merge_chunks ( key_size, main_table, new, unique_key, aggregate_columns) . await ?;
700+ merge_chunks ( key_size, main_table, new, unique_key, aggregate_columns, task_context ) . await ?;
692701 let count_and_min = write_to_files (
693702 records,
694703 total_rows as usize ,
@@ -890,6 +899,7 @@ impl CompactionService for CompactionServiceImpl {
890899 key_len,
891900 // TODO should it respect table partition_split_threshold?
892901 self . config . partition_split_threshold ( ) as usize ,
902+ QueryPlannerImpl :: execution_context_helper ( self . metadata_cache_factory . cache_factory ( ) . make_session_config ( ) ) . task_ctx ( ) ,
893903 )
894904 . await ?;
895905 // There is no point if we cannot split the partition.
@@ -988,8 +998,9 @@ async fn find_partition_keys(
988998 p : AggregateExec ,
989999 key_len : usize ,
9901000 rows_per_partition : usize ,
1001+ context : Arc < TaskContext > ,
9911002) -> Result < Vec < Row > , CubeError > {
992- let mut s = p. execute ( 0 , Arc :: new ( TaskContext :: default ( ) ) ) ?;
1003+ let mut s = p. execute ( 0 , context ) ?;
9931004 let mut points = Vec :: new ( ) ;
9941005 let mut row_count = 0 ;
9951006 while let Some ( b) = s. next ( ) . await . transpose ( ) ? {
@@ -1364,6 +1375,7 @@ pub async fn merge_chunks(
13641375 r : Vec < ArrayRef > ,
13651376 unique_key_columns : Option < Vec < & crate :: metastore:: Column > > ,
13661377 aggregate_columns : Option < Vec < AggregateColumn > > ,
1378+ task_context : Arc < TaskContext > ,
13671379) -> Result < SendableRecordBatchStream , CubeError > {
13681380 let schema = l. schema ( ) ;
13691381 let r = RecordBatch :: try_new ( schema. clone ( ) , r) ?;
@@ -1421,7 +1433,7 @@ pub async fn merge_chunks(
14211433 ) ?) ;
14221434 }
14231435
1424- Ok ( res. execute ( 0 , Arc :: new ( TaskContext :: default ( ) ) ) ?)
1436+ Ok ( res. execute ( 0 , task_context ) ?)
14251437}
14261438
14271439pub async fn merge_replay_handles (
@@ -2331,6 +2343,7 @@ impl MultiSplit {
23312343 ROW_GROUP_SIZE ,
23322344 self . metadata_cache_factory . clone ( ) ,
23332345 ) ;
2346+ let task_context = QueryPlannerImpl :: execution_context_helper ( self . metadata_cache_factory . cache_factory ( ) . make_session_config ( ) ) . task_ctx ( ) ;
23342347 let records = if !in_files. is_empty ( ) {
23352348 read_files (
23362349 & in_files. into_iter ( ) . map ( |( f, _) | f) . collect :: < Vec < _ > > ( ) ,
@@ -2340,10 +2353,10 @@ impl MultiSplit {
23402353 Arc :: new ( store. arrow_schema ( ) ) ,
23412354 )
23422355 . await ?
2343- . execute ( 0 , Arc :: new ( TaskContext :: default ( ) ) ) ?
2356+ . execute ( 0 , task_context ) ?
23442357 } else {
23452358 EmptyExec :: new ( Arc :: new ( store. arrow_schema ( ) ) )
2346- . execute ( 0 , Arc :: new ( TaskContext :: default ( ) ) ) ?
2359+ . execute ( 0 , task_context ) ?
23472360 } ;
23482361 let row_counts = write_to_files_by_keys (
23492362 records,
0 commit comments