Skip to content

Commit f9fabcd

Browse files
craig[bot]jasonlmfong
andcommitted
Merge #154670
154670: log: finalize structured event logging channel migration r=jasonlmfong a=jasonlmfong The following structured events should have their logging channels migrated: CreateChangefeed: TELEMETRY -> CHANGEFEED AlterChangefeed: TELEMETRY -> CHANGEFEED ChangefeedFailed: TELEMETRY -> CHANGEFEED ChangefeedCanceled: TELEMETRY -> CHANGEFEED ChangefeedEmittedBytes: TELEMETRY -> CHANGEFEED SampledQuery: TELEMETRY -> SQL_EXEC SampledTransaction: TELEMETRY -> SQL_EXEC SlowQuery: SQL_PERF -> SQL_EXEC LargeRow: SQL_PERF -> SQL_EXEC TxnRowsWrittenLimit: SQL_PERF -> SQL_EXEC TxnRowsReadLimit: SQL_PERF -> SQL_EXEC SlowQueryInternal: SQL_INTERNAL_PERF -> SQL_EXEC LargeRowInternal: SQL_INTERNAL_PERF -> SQL_EXEC TxnRowsWrittenLimitInternal: SQL_INTERNAL_PERF -> SQL_EXEC TxnRowsReadLimitInternal: SQL_INTERNAL_PERF -> SQL_EXEC This is achieved by setting default of `log.channel_compatibility_mode.enabled` to false This change also updates the protobuff documentation Epic: CRDB-53410 Release note (ops change): This change completes the migration of changfeed events, sample query/txn events, sql_perf events into their respective new logging channels. In order to keep using the old channels, users have to set the setting: `log.channel_compatibility_mode.enabled` to true. ---- log: fix missing logs for internal events previously the logs were only flowing for the non-internal events Epic: None Release note (bug fix): Previously, CockroachDB would not log events for TxnRowsRead and TxnRowsWritten guardrail for internal queries into SQL_INTERNAL_PERF channel. The bug has been present since 21.2 and has now been fixed. Co-authored-by: Jason Fong <[email protected]>
2 parents 9e89769 + 3369fee commit f9fabcd

16 files changed

+764
-769
lines changed

docs/generated/eventlog.md

Lines changed: 188 additions & 181 deletions
Large diffs are not rendered by default.

docs/generated/settings/settings-for-tenants.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -108,7 +108,7 @@ kv.transaction.write_pipelining.locking_reads.enabled boolean true if enabled, t
108108
kv.transaction.write_pipelining.ranged_writes.enabled boolean true if enabled, transactional ranged writes are pipelined through Raft consensus application
109109
kv.transaction.write_pipelining.enabled (alias: kv.transaction.write_pipelining_enabled) boolean true if enabled, transactional writes are pipelined through Raft consensus application
110110
kv.transaction.write_pipelining.max_batch_size (alias: kv.transaction.write_pipelining_max_batch_size) integer 128 if non-zero, defines that maximum size batch that will be pipelined through Raft consensus application
111-
log.channel_compatibility_mode.enabled boolean true when true, logs will continue to log to the expected logging channels; when false, logs will be moved to new logging channels as part of a logging channel consolidation effort application
111+
log.channel_compatibility_mode.enabled boolean false when true, logs will to log to their legacy (pre 26.1) logging channels; when false, logs will be logged to new logging channels application
112112
obs.tablemetadata.automatic_updates.enabled boolean false enables automatic updates of the table metadata cache system.table_metadata application
113113
obs.tablemetadata.data_valid_duration duration 20m0s the duration for which the data in system.table_metadata is considered valid application
114114
schedules.backup.gc_protection.enabled boolean true enable chaining of GC protection across backups run as part of a schedule application

docs/generated/settings/settings.html

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -139,7 +139,7 @@
139139
<tr><td><div id="setting-kv-transaction-write-pipelining-max-batch-size" class="anchored"><code>kv.transaction.write_pipelining.max_batch_size<br />(alias: kv.transaction.write_pipelining_max_batch_size)</code></div></td><td>integer</td><td><code>128</code></td><td>if non-zero, defines that maximum size batch that will be pipelined through Raft consensus</td><td>Basic/Standard/Advanced/Self-Hosted</td></tr>
140140
<tr><td><div id="setting-kvadmission-store-provisioned-bandwidth" class="anchored"><code>kvadmission.store.provisioned_bandwidth</code></div></td><td>byte size</td><td><code>0 B</code></td><td>if set to a non-zero value, this is used as the provisioned bandwidth (in bytes/s), for each store. It can be overridden on a per-store basis using the --store flag. Note that setting the provisioned bandwidth to a positive value may enable disk bandwidth based admission control, since admission.disk_bandwidth_tokens.elastic.enabled defaults to true</td><td>Advanced/Self-Hosted</td></tr>
141141
<tr><td><div id="setting-kvadmission-store-snapshot-ingest-bandwidth-control-enabled" class="anchored"><code>kvadmission.store.snapshot_ingest_bandwidth_control.enabled</code></div></td><td>boolean</td><td><code>true</code></td><td>if set to true, snapshot ingests will be subject to disk write control in AC</td><td>Advanced/Self-Hosted</td></tr>
142-
<tr><td><div id="setting-log-channel-compatibility-mode-enabled" class="anchored"><code>log.channel_compatibility_mode.enabled</code></div></td><td>boolean</td><td><code>true</code></td><td>when true, logs will continue to log to the expected logging channels; when false, logs will be moved to new logging channels as part of a logging channel consolidation effort</td><td>Basic/Standard/Advanced/Self-Hosted</td></tr>
142+
<tr><td><div id="setting-log-channel-compatibility-mode-enabled" class="anchored"><code>log.channel_compatibility_mode.enabled</code></div></td><td>boolean</td><td><code>false</code></td><td>when true, logs will to log to their legacy (pre 26.1) logging channels; when false, logs will be logged to new logging channels</td><td>Basic/Standard/Advanced/Self-Hosted</td></tr>
143143
<tr><td><div id="setting-obs-tablemetadata-automatic-updates-enabled" class="anchored"><code>obs.tablemetadata.automatic_updates.enabled</code></div></td><td>boolean</td><td><code>false</code></td><td>enables automatic updates of the table metadata cache system.table_metadata</td><td>Basic/Standard/Advanced/Self-Hosted</td></tr>
144144
<tr><td><div id="setting-obs-tablemetadata-data-valid-duration" class="anchored"><code>obs.tablemetadata.data_valid_duration</code></div></td><td>duration</td><td><code>20m0s</code></td><td>the duration for which the data in system.table_metadata is considered valid</td><td>Basic/Standard/Advanced/Self-Hosted</td></tr>
145145
<tr><td><div id="setting-schedules-backup-gc-protection-enabled" class="anchored"><code>schedules.backup.gc_protection.enabled</code></div></td><td>boolean</td><td><code>true</code></td><td>enable chaining of GC protection across backups run as part of a schedule</td><td>Basic/Standard/Advanced/Self-Hosted</td></tr>

pkg/ccl/changefeedccl/changefeed_stmt.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2317,7 +2317,7 @@ func getChangefeedEventMigrator(migrateEvent bool) log.StructuredEventMigrator {
23172317
func() bool {
23182318
return migrateEvent
23192319
},
2320-
channel.CHANGEFEED,
2320+
channel.TELEMETRY,
23212321
)
23222322
}
23232323

pkg/ccl/changefeedccl/changefeed_test.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -12742,9 +12742,9 @@ func TestTableRenameDuringDatabaseLevelChangefeed(t *testing.T) {
1274212742

1274312743
func getChangefeedLoggingChannel(sv *settings.Values) logpb.Channel {
1274412744
if log.ShouldMigrateEvent(sv) {
12745-
return logpb.Channel_CHANGEFEED
12745+
return logpb.Channel_TELEMETRY
1274612746
}
12747-
return logpb.Channel_TELEMETRY
12747+
return logpb.Channel_CHANGEFEED
1274812748
}
1274912749

1275012750
func TestCreateTableLevelChangefeedWithDBPrivilege(t *testing.T) {

pkg/ccl/telemetryccl/telemetry_logging_test.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -167,10 +167,10 @@ type telemetrySpy struct {
167167

168168
func (l *telemetrySpy) channelsToIntercept() []log.Channel {
169169
if log.ShouldMigrateEvent(l.sv) {
170-
return []log.Channel{logpb.Channel_TELEMETRY, logpb.Channel_SQL_EXEC}
170+
return []log.Channel{logpb.Channel_TELEMETRY}
171171
}
172172

173-
return []log.Channel{logpb.Channel_TELEMETRY}
173+
return []log.Channel{logpb.Channel_TELEMETRY, logpb.Channel_SQL_EXEC}
174174
}
175175

176176
func (l *telemetrySpy) Intercept(entry []byte) {

pkg/sql/conn_executor_exec.go

Lines changed: 8 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -3097,6 +3097,7 @@ func (ex *connExecutor) handleTxnRowsGuardrails(
30973097
if shouldLog {
30983098
commonSQLEventDetails := ex.planner.getCommonSQLEventDetails()
30993099
var event logpb.EventPayload
3100+
var migrator log.StructuredEventMigrator
31003101
if ex.executorType == executorTypeInternal {
31013102
if isRead {
31023103
event = &eventpb.TxnRowsReadLimitInternal{
@@ -3109,6 +3110,9 @@ func (ex *connExecutor) handleTxnRowsGuardrails(
31093110
CommonTxnRowsLimitDetails: commonTxnRowsLimitDetails,
31103111
}
31113112
}
3113+
migrator = log.NewStructuredEventMigrator(func() bool {
3114+
return log.ShouldMigrateEvent(ex.planner.ExecCfg().SV())
3115+
}, logpb.Channel_SQL_INTERNAL_PERF)
31123116
} else {
31133117
if isRead {
31143118
event = &eventpb.TxnRowsReadLimit{
@@ -3121,12 +3125,12 @@ func (ex *connExecutor) handleTxnRowsGuardrails(
31213125
CommonTxnRowsLimitDetails: commonTxnRowsLimitDetails,
31223126
}
31233127
}
3124-
migrator := log.NewStructuredEventMigrator(func() bool {
3128+
migrator = log.NewStructuredEventMigrator(func() bool {
31253129
return log.ShouldMigrateEvent(ex.planner.ExecCfg().SV())
3126-
}, logpb.Channel_SQL_EXEC)
3127-
migrator.StructuredEvent(ctx, severity.INFO, event)
3128-
logCounter.Inc(1)
3130+
}, logpb.Channel_SQL_PERF)
31293131
}
3132+
migrator.StructuredEvent(ctx, severity.INFO, event)
3133+
logCounter.Inc(1)
31303134
}
31313135
if shouldErr {
31323136
if isRead {

pkg/sql/event_log_test.go

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -765,9 +765,9 @@ func TestPerfLogging(t *testing.T) {
765765
db.Exec(t, tc.query)
766766
}
767767

768-
expectedChannel := tc.channel
769-
if !log.ChannelCompatibilityModeEnabled.Get(&s.ClusterSettings().SV) {
770-
expectedChannel = logpb.Channel_SQL_EXEC
768+
expectedChannel := logpb.Channel_SQL_EXEC
769+
if log.ShouldMigrateEvent(&s.ClusterSettings().SV) {
770+
expectedChannel = tc.channel
771771
}
772772

773773
var logRe = regexp.MustCompile(tc.logRe)

pkg/sql/exec_log.go

Lines changed: 8 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -252,16 +252,16 @@ func (p *planner) maybeLogStatementInternal(
252252
// Is the query actually slow?
253253
queryDuration > slowLogThreshold) {
254254
commonSQLEventDetails := p.getCommonSQLEventDetails()
255-
migrator := log.NewStructuredEventMigrator(func() bool {
256-
return !log.ChannelCompatibilityModeEnabled.Get(p.ExecCfg().SV())
257-
}, logpb.Channel_SQL_EXEC)
258255
switch {
259256
case execType == executorTypeExec:
260257
// Non-internal queries are always logged to the slow query log.
261258
event := &eventpb.SlowQuery{
262259
CommonSQLEventDetails: commonSQLEventDetails,
263260
CommonSQLExecDetails: execDetails,
264261
}
262+
migrator := log.NewStructuredEventMigrator(func() bool {
263+
return log.ShouldMigrateEvent(p.ExecCfg().SV())
264+
}, logpb.Channel_SQL_PERF)
265265
migrator.StructuredEvent(ctx, severity.INFO, event)
266266
case execType == executorTypeInternal && slowInternalQueryLogEnabled:
267267
// Internal queries that surpass the slow query log threshold should only
@@ -270,6 +270,9 @@ func (p *planner) maybeLogStatementInternal(
270270
CommonSQLEventDetails: commonSQLEventDetails,
271271
CommonSQLExecDetails: execDetails,
272272
}
273+
migrator := log.NewStructuredEventMigrator(func() bool {
274+
return log.ShouldMigrateEvent(p.ExecCfg().SV())
275+
}, logpb.Channel_SQL_INTERNAL_PERF)
273276
migrator.StructuredEvent(ctx, severity.INFO,
274277
event)
275278
}
@@ -435,7 +438,7 @@ func (p *planner) maybeLogStatementInternal(
435438

436439
migrator := log.NewStructuredEventMigrator(func() bool {
437440
return log.ShouldMigrateEvent(p.ExecCfg().SV())
438-
}, logpb.Channel_SQL_EXEC)
441+
}, logpb.Channel_TELEMETRY)
439442

440443
migrator.StructuredEvent(ctx, severity.INFO, sampledQuery)
441444
}
@@ -524,7 +527,7 @@ func (p *planner) logTransaction(
524527

525528
migrator := log.NewStructuredEventMigrator(func() bool {
526529
return log.ShouldMigrateEvent(p.ExecCfg().SV())
527-
}, logpb.Channel_SQL_EXEC)
530+
}, logpb.Channel_TELEMETRY)
528531

529532
migrator.StructuredEvent(ctx, severity.INFO, sampledTxn)
530533
}

pkg/sql/row/helper.go

Lines changed: 7 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -470,14 +470,18 @@ func (rh *RowHelper) CheckRowSize(
470470
rh.metrics.MaxRowSizeLogCount.Inc(1)
471471
}
472472
var event logpb.EventPayload
473+
var migrator log.StructuredEventMigrator
473474
if rh.sd.Internal {
474475
event = &eventpb.LargeRowInternal{CommonLargeRowDetails: details}
476+
migrator = log.NewStructuredEventMigrator(func() bool {
477+
return rh.migrateLargeRowLog
478+
}, logpb.Channel_SQL_INTERNAL_PERF)
475479
} else {
476480
event = &eventpb.LargeRow{CommonLargeRowDetails: details}
481+
migrator = log.NewStructuredEventMigrator(func() bool {
482+
return rh.migrateLargeRowLog
483+
}, logpb.Channel_SQL_PERF)
477484
}
478-
migrator := log.NewStructuredEventMigrator(func() bool {
479-
return rh.migrateLargeRowLog
480-
}, logpb.Channel_SQL_EXEC)
481485
migrator.StructuredEvent(ctx, severity.INFO, event)
482486
}
483487
if shouldErr {

0 commit comments

Comments
 (0)