Skip to content

Commit 82587bd

Browse files
authored
chore: improve the log prefix (#18211)
1 parent 780f484 commit 82587bd

File tree

13 files changed

+59
-39
lines changed

13 files changed

+59
-39
lines changed

src/query/service/src/catalogs/default/mutable_catalog.rs

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -307,7 +307,7 @@ impl Catalog for MutableCatalog {
307307
// Create database.
308308
let res = self.ctx.meta.create_database(req.clone()).await?;
309309
info!(
310-
"db name: {}, engine: {}",
310+
"[CATALOG] Creating database: name={}, engine={}",
311311
req.name_ident.database_name(),
312312
&req.meta.engine
313313
);
@@ -698,13 +698,13 @@ impl Catalog for MutableCatalog {
698698
}
699699

700700
info!(
701-
"updating multi table meta. number of tables: {}",
701+
"[CATALOG] Updating multiple table metadata: table_count={}",
702702
req.update_table_metas.len()
703703
);
704704
let begin = Instant::now();
705705
let res = self.ctx.meta.update_multi_table_meta(req).await;
706706
info!(
707-
"update multi table meta done. time used {:?}",
707+
"[CATALOG] Multiple table metadata update completed: elapsed_time={:?}",
708708
begin.elapsed()
709709
);
710710
Ok(res?)

src/query/service/src/interpreters/hook/compact_hook.rs

Lines changed: 8 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -66,7 +66,10 @@ pub async fn hook_compact(
6666
) {
6767
let op_name = trace_ctx.operation_name.clone();
6868
if let Err(e) = do_hook_compact(ctx, pipeline, compact_target, trace_ctx, lock_opt).await {
69-
info!("compact hook ({}) with error (ignored): {}", op_name, e);
69+
info!(
70+
"[COMPACT-HOOK] Operation {} failed with error (ignored): {}",
71+
op_name, e
72+
);
7073
}
7174
}
7275

@@ -86,13 +89,13 @@ async fn do_hook_compact(
8689
if info.res.is_ok() {
8790
let op_name = &trace_ctx.operation_name;
8891
metrics_inc_compact_hook_main_operation_time_ms(op_name, trace_ctx.start.elapsed().as_millis() as u64);
89-
info!("execute {op_name} finished successfully. running table optimization job.");
92+
info!("[COMPACT-HOOK] Operation {op_name} completed successfully, starting table optimization job.");
9093

9194
let compact_start_at = Instant::now();
9295
let compaction_limits = match compact_target.mutation_kind {
9396
MutationKind::Insert => {
9497
let compaction_num_block_hint = ctx.get_compaction_num_block_hint(&compact_target.table);
95-
info!("table {} hint number of blocks need to be compacted {}", compact_target.table, compaction_num_block_hint);
98+
info!("[COMPACT-HOOK] Table {} requires compaction of {} blocks", compact_target.table, compaction_num_block_hint);
9699
if compaction_num_block_hint == 0 {
97100
return Ok(());
98101
}
@@ -120,9 +123,9 @@ async fn do_hook_compact(
120123
compact_table(ctx, compact_target, compaction_limits, lock_opt)
121124
}) {
122125
Ok(_) => {
123-
info!("execute {op_name} finished successfully. table optimization job finished.");
126+
info!("[COMPACT-HOOK] Operation {op_name} and table optimization job completed successfully.");
124127
}
125-
Err(e) => { info!("execute {op_name} finished successfully. table optimization job failed. {:?}", e); }
128+
Err(e) => { info!("[COMPACT-HOOK] Operation {op_name} completed but table optimization job failed: {:?}", e); }
126129
}
127130

128131
// reset the progress value

src/query/service/src/interpreters/hook/hook.rs

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -76,12 +76,12 @@ impl HookOperator {
7676
pub async fn execute_compact(&self, pipeline: &mut Pipeline) {
7777
match self.ctx.get_settings().get_enable_compact_after_write() {
7878
Ok(false) => {
79-
info!("auto compaction disabled");
79+
info!("[TABLE-HOOK] Auto compaction is disabled");
8080
return;
8181
}
8282
Err(e) => {
8383
// swallow the exception, compaction hook should not prevent the main operation.
84-
warn!("failed to get compaction settings, ignored. {}", e);
84+
warn!("[TABLE-HOOK] Failed to retrieve compaction settings, continuing without compaction: {}", e);
8585
return;
8686
}
8787
Ok(true) => {

src/query/service/src/interpreters/hook/refresh_hook.rs

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -63,13 +63,13 @@ pub async fn hook_refresh(ctx: Arc<QueryContext>, pipeline: &mut Pipeline, desc:
6363

6464
pipeline.set_on_finished(move |info: &ExecutionInfo| {
6565
if info.res.is_ok() {
66-
info!("execute pipeline finished successfully, starting run refresh job.");
66+
info!("[REFRESH-HOOK] Pipeline execution completed successfully, starting refresh job");
6767
match GlobalIORuntime::instance().block_on(do_refresh(ctx, desc)) {
6868
Ok(_) => {
69-
info!("execute refresh job successfully.");
69+
info!("[REFRESH-HOOK] Refresh job completed successfully");
7070
}
7171
Err(e) => {
72-
info!("execute refresh job failed. {:?}", e);
72+
info!("[REFRESH-HOOK] Refresh job failed: {:?}", e);
7373
}
7474
}
7575
}

src/query/service/src/interpreters/hook/vacuum_hook.rs

Lines changed: 6 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -61,7 +61,7 @@ pub fn hook_vacuum_temp_files(query_ctx: &Arc<QueryContext>) -> Result<()> {
6161
}
6262

6363
log::info!(
64-
"Vacuum temporary files by hook, node files: {:?}",
64+
"[VACUUM-HOOK] Cleaning temporary files from nodes: {:?}",
6565
node_files
6666
);
6767

@@ -77,7 +77,7 @@ pub fn hook_vacuum_temp_files(query_ctx: &Arc<QueryContext>) -> Result<()> {
7777
.await;
7878

7979
if let Err(cause) = &removed_files {
80-
log::warn!("Vacuum temporary files has error: {:?}", cause);
80+
log::warn!("[VACUUM-HOOK] Failed to clean temporary files: {:?}", cause);
8181
}
8282

8383
Ok(())
@@ -96,7 +96,10 @@ pub fn hook_disk_temp_dir(query_ctx: &Arc<QueryContext>) -> Result<()> {
9696
.get_spilling_to_disk_vacuum_unknown_temp_dirs_limit()?;
9797
let deleted = mgr.drop_disk_spill_dir_unknown(limit)?;
9898
if !deleted.is_empty() {
99-
warn!("Deleted residual temporary directories: {:?}", deleted)
99+
warn!(
100+
"[VACUUM-HOOK] Removed residual temporary directories: {:?}",
101+
deleted
102+
)
100103
}
101104
}
102105

src/query/service/src/servers/http/middleware/session.rs

Lines changed: 7 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -617,7 +617,7 @@ impl<E: Endpoint> Endpoint for HTTPSessionEndpoint<E> {
617617
} else {
618618
let msg =
619619
format!("sticky_node_id '{sticky_node_id}' not found in cluster",);
620-
warn!("{}", msg);
620+
warn!("[HTTP-SESSION] {}", msg);
621621
Err(Error::from(HttpErrorCode::bad_request(
622622
ErrorCode::BadArguments(msg),
623623
)))
@@ -650,7 +650,7 @@ impl<E: Endpoint> Endpoint for HTTPSessionEndpoint<E> {
650650
}
651651
Ok(None) => {
652652
let msg = format!("Not find the '{}' warehouse; it is possible that all nodes of the warehouse have gone offline. Please exit the client and reconnect, or use `use warehouse <new_warehouse>`", warehouse);
653-
warn!("{}", msg);
653+
warn!("[HTTP-SESSION] {}", msg);
654654
return Err(Error::from(HttpErrorCode::bad_request(
655655
ErrorCode::UnknownWarehouse(msg),
656656
)));
@@ -671,7 +671,9 @@ impl<E: Endpoint> Endpoint for HTTPSessionEndpoint<E> {
671671
}
672672
}
673673

674-
log::warn!("Ignore header ({HEADER_WAREHOUSE}: {warehouse:?})");
674+
log::warn!(
675+
"[HTTP-SESSION] Ignoring warehouse header: {HEADER_WAREHOUSE}={warehouse:?}"
676+
);
675677
}
676678
};
677679

@@ -706,13 +708,13 @@ impl<E: Endpoint> Endpoint for HTTPSessionEndpoint<E> {
706708
let err = HttpErrorCode::error_code(err);
707709
if err.status() == StatusCode::UNAUTHORIZED {
708710
warn!(
709-
"http auth failure: {method} {uri}, headers={:?}, error={}",
711+
"[HTTP-SESSION] Authentication failure: {method} {uri}, headers={:?}, error={}",
710712
sanitize_request_headers(&headers),
711713
err
712714
);
713715
} else {
714716
error!(
715-
"http request err: {method} {uri}, headers={:?}, error={}",
717+
"[HTTP-SESSION] Request error: {method} {uri}, headers={:?}, error={}",
716718
sanitize_request_headers(&headers),
717719
err
718720
);

src/query/service/src/servers/http/v1/query/http_query.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -807,7 +807,7 @@ impl HttpQuery {
807807
warnings: query_context.pop_warnings(),
808808
};
809809

810-
info!(
810+
error!(
811811
"[HTTP-QUERY] Query state changed to Stopped, failed to start: {:?}",
812812
e
813813
);

src/query/settings/src/settings_global.rs

Lines changed: 8 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -108,15 +108,21 @@ impl Settings {
108108
.insert(name.clone(), match default_settings.settings.get(&name) {
109109
None => {
110110
// the settings may be deprecated
111-
warn!("Ignore deprecated global setting {} = {}", name, val);
111+
warn!(
112+
"[SETTINGS] Ignoring deprecated global setting: {} = {}",
113+
name, val
114+
);
112115
continue;
113116
}
114117
Some(default_setting_value) => {
115118
if DefaultSettings::check_setting_scope(&name, SettingScope::Global)
116119
.is_err()
117120
{
118121
// the settings is session only, ignore the global setting
119-
warn!("Ignore session only global setting {} = {}", name, val);
122+
warn!(
123+
"[SETTINGS] Ignoring session-only setting at global scope: {} = {}",
124+
name, val
125+
);
120126
continue;
121127
}
122128
match &default_setting_value.value {

src/query/sql/src/planner/binder/table.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -366,7 +366,7 @@ impl Binder {
366366
let columns = self.metadata.read().columns_by_table_index(table_index);
367367
let scan_id = self.metadata.write().next_scan_id();
368368
log::info!(
369-
"[RUNTIME-FILTER]bind_base_table scan_id: {},table_entry: {:?}",
369+
"[RUNTIME-FILTER] bind_base_table scan_id: {},table_entry: {:?}",
370370
scan_id,
371371
table
372372
);

src/query/storages/fuse/src/operations/common/processors/sink_commit.rs

Lines changed: 11 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -288,7 +288,10 @@ where F: SnapshotGenerator + Send + Sync + 'static
288288
// Vacuum in a best-effort manner, errors are ignored
289289
warn!("Vacuum table {} failed : {}", tbl.table_info.name, e);
290290
} else {
291-
info!("vacuum table {} done", tbl.table_info.name);
291+
info!(
292+
"[SINK-COMMIT] Vacuum completed for table {}",
293+
tbl.table_info.name
294+
);
292295
}
293296
}
294297

@@ -307,7 +310,7 @@ where F: SnapshotGenerator + Send + Sync + 'static
307310
if let Some(vacuum_handler) = &self.vacuum_handler {
308311
self.exec_auto_vacuum2(tbl, vacuum_handler.as_ref()).await;
309312
} else {
310-
info!("no vacuum handler found for auto vacuuming, please re-check your license");
313+
info!("[SINK-COMMIT] No vacuum handler available for auto vacuuming, please verify your license");
311314
}
312315

313316
Ok(())
@@ -530,7 +533,7 @@ where F: SnapshotGenerator + Send + Sync + 'static
530533
{
531534
let elapsed_time = self.start_time.elapsed();
532535
let status = format!(
533-
"commit mutation success after {} retries, which took {:?}",
536+
"[SINK-COMMIT] Mutation committed successfully after {} retries in {:?}",
534537
self.retries, elapsed_time
535538
);
536539
metrics_inc_commit_milliseconds(elapsed_time.as_millis());
@@ -554,7 +557,10 @@ where F: SnapshotGenerator + Send + Sync + 'static
554557
.collect::<Vec<_>>();
555558
(tbl, stream_descriptions)
556559
};
557-
info!("commit mutation success, targets {:?}", target_descriptions);
560+
info!(
561+
"[SINK-COMMIT] Mutation committed successfully, targets: {:?}",
562+
target_descriptions
563+
);
558564
self.state = State::Finish;
559565
}
560566
Err(e) if self.is_error_recoverable(&e) => {
@@ -563,7 +569,7 @@ where F: SnapshotGenerator + Send + Sync + 'static
563569
Some(d) => {
564570
let name = table_info.name.clone();
565571
debug!(
566-
"got error TableVersionMismatched, tx will be retried {} ms later. table name {}, identity {}",
572+
"[SINK-COMMIT] TableVersionMismatched error detected, transaction will retry in {} ms. Table: {}, ID: {}",
567573
d.as_millis(),
568574
name.as_str(),
569575
table_info.ident

0 commit comments

Comments
 (0)