@@ -7,7 +7,7 @@ use crate::metastore::{MetaStore, RowKey, TableId};
77use crate :: queryplanner:: trace_data_loaded:: DataLoadedSize ;
88use crate :: store:: compaction:: CompactionService ;
99use crate :: store:: ChunkDataStore ;
10- use crate :: CubeError ;
10+ use crate :: { app_metrics , CubeError } ;
1111use async_trait:: async_trait;
1212use serde:: { Deserialize , Serialize } ;
1313use std:: sync:: Arc ;
@@ -117,10 +117,16 @@ impl JobIsolatedProcessor {
117117 let compaction_service = self . compaction_service . clone ( ) ;
118118 let partition_id = * partition_id;
119119 let data_loaded_size = DataLoadedSize :: new ( ) ;
120+ app_metrics:: JOBS_PARTITION_COMPACTION . add ( 1 ) ;
120121 let r = compaction_service
121122 . compact ( partition_id, data_loaded_size. clone ( ) )
122123 . await ;
123- r?;
124+ if let Err ( e) = r {
125+ app_metrics:: JOBS_PARTITION_COMPACTION_FAILURES . add ( 1 ) ;
126+ return Err ( e) ;
127+ }
128+ app_metrics:: JOBS_PARTITION_COMPACTION_COMPLETED . add ( 1 ) ;
129+
124130 Ok ( JobProcessResult :: new ( data_loaded_size. get ( ) ) )
125131 } else {
126132 Self :: fail_job_row_key ( job)
@@ -130,7 +136,13 @@ impl JobIsolatedProcessor {
130136 if let RowKey :: Table ( TableId :: MultiPartitions , id) = job. row_reference ( ) {
131137 let compaction_service = self . compaction_service . clone ( ) ;
132138 let id = * id;
133- compaction_service. split_multi_partition ( id) . await ?;
139+ app_metrics:: JOBS_MULTI_PARTITION_SPLIT . add ( 1 ) ;
140+ let r = compaction_service. split_multi_partition ( id) . await ;
141+ if let Err ( e) = r {
142+ app_metrics:: JOBS_MULTI_PARTITION_SPLIT_FAILURES . add ( 1 ) ;
143+ return Err ( e) ;
144+ }
145+ app_metrics:: JOBS_MULTI_PARTITION_SPLIT_COMPLETED . add ( 1 ) ;
134146 Ok ( JobProcessResult :: default ( ) )
135147 } else {
136148 Self :: fail_job_row_key ( job)
@@ -143,9 +155,15 @@ impl JobIsolatedProcessor {
143155 let compaction_service = self . compaction_service . clone ( ) ;
144156 let multi_part_id = * multi_part_id;
145157 for p in meta_store. find_unsplit_partitions ( multi_part_id) . await ? {
146- compaction_service
158+ app_metrics:: JOBS_FINISH_MULTI_SPLIT . add ( 1 ) ;
159+ let r = compaction_service
147160 . finish_multi_split ( multi_part_id, p)
148- . await ?
161+ . await ;
162+ if let Err ( e) = r {
163+ app_metrics:: JOBS_FINISH_MULTI_SPLIT_FAILURES . add ( 1 ) ;
164+ return Err ( e) ;
165+ }
166+ app_metrics:: JOBS_FINISH_MULTI_SPLIT_COMPLETED . add ( 1 ) ;
149167 }
150168
151169 Ok ( JobProcessResult :: default ( ) )
@@ -196,9 +214,16 @@ impl JobIsolatedProcessor {
196214 ) ) ;
197215 }
198216 let data_loaded_size = DataLoadedSize :: new ( ) ;
199- self . chunk_store
217+ app_metrics:: JOBS_REPARTITION_CHUNK . add ( 1 ) ;
218+ let r = self
219+ . chunk_store
200220 . repartition_chunk ( chunk_id, data_loaded_size. clone ( ) )
201- . await ?;
221+ . await ;
222+ if let Err ( e) = r {
223+ app_metrics:: JOBS_REPARTITION_CHUNK_FAILURES . add ( 1 ) ;
224+ return Err ( e) ;
225+ }
226+ app_metrics:: JOBS_REPARTITION_CHUNK_COMPLETED . add ( 1 ) ;
202227 Ok ( JobProcessResult :: new ( data_loaded_size. get ( ) ) )
203228 } else {
204229 Self :: fail_job_row_key ( job)
0 commit comments