Skip to content

Commit f1f6e65

Browse files
tenfyzhonghongyunyan
authored andcommitted
Refactor: Remove unused debug log (#3157)
close #3156
1 parent b45049c commit f1f6e65

File tree

5 files changed

+258
-289
lines changed

5 files changed

+258
-289
lines changed

downstreamadapter/dispatcher/basic_dispatcher.go

Lines changed: 8 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -29,6 +29,7 @@ import (
2929
"github.com/pingcap/ticdc/pkg/errors"
3030
"github.com/pingcap/ticdc/pkg/sink/util"
3131
"go.uber.org/zap"
32+
"go.uber.org/zap/zapcore"
3233
)
3334

3435
// DispatcherService defines the interface for providing dispatcher information and basic event handling.
@@ -373,7 +374,13 @@ func (d *BasicDispatcher) handleEvents(dispatcherEvents []DispatcherEvent, wakeC
373374
latestResolvedTs := uint64(0)
374375
// Dispatcher is ready, handle the events
375376
for _, dispatcherEvent := range dispatcherEvents {
376-
// if log.GetLevel() == zapcore.DebugLevel {
377+
if log.GetLevel() == zapcore.DebugLevel {
378+
log.Debug("dispatcher receive all event",
379+
zap.Stringer("dispatcher", d.id), zap.Int64("mode", d.mode),
380+
zap.String("eventType", commonEvent.TypeToString(dispatcherEvent.Event.GetType())),
381+
zap.Any("event", dispatcherEvent.Event))
382+
}
383+
377384
failpoint.Inject("HandleEventsSlowly", func() {
378385
lag := time.Duration(rand.Intn(5000)) * time.Millisecond
379386
log.Warn("handle events slowly", zap.Duration("lag", lag))
@@ -392,22 +399,6 @@ func (d *BasicDispatcher) handleEvents(dispatcherEvents []DispatcherEvent, wakeC
392399
continue
393400
}
394401

395-
// if event.GetType() == commonEvent.TypeDMLEvent {
396-
// dml := event.(*commonEvent.DMLEvent)
397-
// for i, rowType := range dml.RowTypes {
398-
// rowKey := dml.RowKeys[i]
399-
// log.Info("dispatcher receive dml row",
400-
// zap.Stringer("dispatcher", d.id),
401-
// zap.Int64("mode", d.mode),
402-
// zap.String("eventType", commonEvent.TypeToString(event.GetType())),
403-
// zap.Uint64("commitTs", event.GetCommitTs()),
404-
// zap.Uint64("startTs", event.GetStartTs()),
405-
// zap.Int("rowType", int(rowType)),
406-
// zap.String("hexKey", spanz.HexKey(rowKey)),
407-
// zap.Any("rawKey", rowKey))
408-
// }
409-
// }
410-
411402
// only when we receive the first event, we can regard the dispatcher begin syncing data
412403
// then turning into working status.
413404
if d.isFirstEvent(event) {

logservice/eventstore/event_store.go

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -229,9 +229,6 @@ type eventStore struct {
229229

230230
// compressionThreshold is the size in bytes above which a value will be compressed.
231231
compressionThreshold int
232-
233-
// iterIDCounter is used to generate unique id for eventStoreIter.
234-
iterIDCounter atomic.Uint64
235232
}
236233

237234
const (
@@ -863,7 +860,6 @@ func (e *eventStore) GetIterator(dispatcherID common.DispatcherID, dataRange com
863860
}
864861

865862
return &eventStoreIter{
866-
id: e.iterIDCounter.Add(1),
867863
tableSpan: stat.tableSpan,
868864
needCheckSpan: needCheckSpan,
869865
innerIter: iter,
@@ -1148,7 +1144,6 @@ func (e *eventStore) writeEvents(db *pebble.DB, events []eventWithCallback, enco
11481144
}
11491145

11501146
type eventStoreIter struct {
1151-
id uint64
11521147
tableSpan *heartbeatpb.TableSpan
11531148
// true when need check whether data from `innerIter` is in `tableSpan`
11541149
// (e.g. subscription span is not the same as dispatcher span)

pkg/sink/mysql/mysql_writer_dml.go

Lines changed: 0 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -520,23 +520,6 @@ func (w *Writer) generateBatchSQLInSafeMode(events []*commonEvent.DMLEvent) ([]s
520520
if rowType == prevType {
521521
sql, values := w.generateNormalSQLs(events)
522522
log.Info("normal sql should be", zap.Any("sql", sql), zap.Any("values", values), zap.Int("writerID", w.id))
523-
for _, dml_event := range events {
524-
log.Info("originalDmlEvent", zap.String("dmlEvent", dml_event.String()), zap.Int("writerID", w.id))
525-
}
526-
indexs := tableInfo.GetPKIndex()
527-
for _, colID := range indexs {
528-
info, ok := tableInfo.GetColumnInfo(colID)
529-
log.Info("indexInfo", zap.Int64("colID", colID), zap.Int("writerID", w.id))
530-
if ok {
531-
log.Info("columnInfo", zap.Int64("colID", colID), zap.String("colName", info.Name.O), zap.Int("writerID", w.id))
532-
}
533-
}
534-
for index, rowChange := range rowChanges {
535-
if rowChange.RowType == common.RowTypeDelete {
536-
key := genKeyList(&rowChange.PreRow, tableInfo)
537-
log.Info("rowChangeKey", zap.Int("index", index), zap.ByteString("key", key), zap.Int("writerID", w.id))
538-
}
539-
}
540523
log.Panic("invalid row changes", zap.String("schemaName", tableInfo.GetSchemaName()),
541524
zap.String("tableName", tableInfo.GetTableName()), zap.Any("rowChanges", rowChanges),
542525
zap.Any("prevType", prevType), zap.Any("currentType", rowType), zap.Int("writerID", w.id))

tests/integration_tests/run_heavy_it_in_ci.sh

Lines changed: 126 additions & 126 deletions
Original file line numberDiff line numberDiff line change
@@ -30,146 +30,146 @@ group_num=${group#G}
3030
# 12 CPU cores will be allocated to run each mysql heavy group in CI pipelines.
3131
mysql_groups=(
3232
# G00
33-
# 'generate_column many_pk_or_uk multi_source'
34-
# # G01
35-
# 'api_v2 ddl_for_split_tables_with_random_move_table'
36-
# # G02
37-
# 'availability ddl_for_split_tables_with_failover'
38-
# # G03
39-
# 'cdc move_table checkpoint_race_ddl_crash'
40-
# # G04
41-
# 'syncpoint syncpoint_check_ts'
42-
# # G05
43-
# 'ddl_for_split_tables_with_merge_and_split random_drop_message'
44-
# # G06
45-
# 'ddl_for_split_tables_with_random_merge_and_split'
46-
# # G07
47-
# # 'consistent_partition_table consistent_replicate_gbk consistent_replicate_ddl'
48-
# 'complex_transaction'
49-
# # G08
50-
# 'default_value http_proxies bank ddl_for_split_tables_random_schedule'
51-
# # G09
52-
# 'resolve_lock merge_table drop_many_tables'
53-
# # G10
54-
# # 'consistent_replicate_nfs consistent_replicate_storage_file consistent_replicate_storage_file_large_value consistent_replicate_storage_s3'
55-
# 'multi_changefeeds ddl_wait'
56-
# # G11
57-
# 'ddl_reentrant force_replicate_table multi_source'
58-
# # G12
59-
# 'tidb_mysql_test ddl_with_random_move_table'
60-
# # G13
61-
# 'fail_over region_merge'
62-
# # G14
63-
# 'fail_over_ddl_mix'
64-
# # G15
65-
# 'fail_over_ddl_mix_with_syncpoint'
33+
'generate_column many_pk_or_uk multi_source'
34+
# G01
35+
'api_v2 ddl_for_split_tables_with_random_move_table'
36+
# G02
37+
'availability ddl_for_split_tables_with_failover'
38+
# G03
39+
'cdc move_table checkpoint_race_ddl_crash'
40+
# G04
41+
'syncpoint syncpoint_check_ts'
42+
# G05
43+
'ddl_for_split_tables_with_merge_and_split random_drop_message'
44+
# G06
45+
'ddl_for_split_tables_with_random_merge_and_split'
46+
# G07
47+
# 'consistent_partition_table consistent_replicate_gbk consistent_replicate_ddl'
48+
'complex_transaction'
49+
# G08
50+
'default_value http_proxies bank ddl_for_split_tables_random_schedule'
51+
# G09
52+
'resolve_lock merge_table drop_many_tables'
53+
# G10
54+
# 'consistent_replicate_nfs consistent_replicate_storage_file consistent_replicate_storage_file_large_value consistent_replicate_storage_s3'
55+
'multi_changefeeds ddl_wait'
56+
# G11
57+
'ddl_reentrant force_replicate_table multi_source'
58+
# G12
59+
'tidb_mysql_test ddl_with_random_move_table'
60+
# G13
61+
'fail_over region_merge'
62+
# G14
63+
'fail_over_ddl_mix'
64+
# G15
65+
'fail_over_ddl_mix_with_syncpoint'
6666
)
6767

6868
# 12 CPU cores will be allocated to run each kafka heavy group in CI pipelines.
6969
kafka_groups=(
7070
# G00
71-
# 'generate_column many_pk_or_uk'
72-
# # G01
73-
# 'canal_json_basic canal_json_claim_check canal_json_content_compatible ddl_for_split_tables_with_random_move_table'
74-
# # G02
75-
# 'canal_json_handle_key_only ddl_for_split_tables_with_failover'
76-
# # G03
77-
# 'canal_json_adapter_compatibility ddl_for_split_tables_with_merge_and_split'
78-
# # G04
79-
# 'open_protocol_claim_check open_protocol_handle_key_only random_drop_message'
80-
# # G05
81-
# 'move_table drop_many_tables checkpoint_race_ddl_crash'
82-
# # G06
83-
# 'cdc default_value ddl_for_split_tables_with_random_merge_and_split'
84-
# # G07
85-
# 'merge_table resolve_lock force_replicate_table'
86-
# # G08
87-
# 'kafka_simple_claim_check kafka_simple_claim_check_avro tidb_mysql_test'
88-
# # G09
89-
# 'kafka_simple_handle_key_only kafka_simple_handle_key_only_avro mq_sink_error_resume multi_source'
90-
# # G10
91-
# 'kafka_column_selector kafka_column_selector_avro ddl_with_random_move_table'
92-
# # G11
93-
# 'fail_over region_merge multi_changefeeds'
94-
# # G12
95-
# 'ddl_for_split_tables_random_schedule'
96-
# # G13
97-
# 'debezium01 fail_over_ddl_mix'
98-
# # G14
99-
# 'debezium02'
100-
# # G15
101-
# 'debezium03'
71+
'generate_column many_pk_or_uk'
72+
# G01
73+
'canal_json_basic canal_json_claim_check canal_json_content_compatible ddl_for_split_tables_with_random_move_table'
74+
# G02
75+
'canal_json_handle_key_only ddl_for_split_tables_with_failover'
76+
# G03
77+
'canal_json_adapter_compatibility ddl_for_split_tables_with_merge_and_split'
78+
# G04
79+
'open_protocol_claim_check open_protocol_handle_key_only random_drop_message'
80+
# G05
81+
'move_table drop_many_tables checkpoint_race_ddl_crash'
82+
# G06
83+
'cdc default_value ddl_for_split_tables_with_random_merge_and_split'
84+
# G07
85+
'merge_table resolve_lock force_replicate_table'
86+
# G08
87+
'kafka_simple_claim_check kafka_simple_claim_check_avro tidb_mysql_test'
88+
# G09
89+
'kafka_simple_handle_key_only kafka_simple_handle_key_only_avro mq_sink_error_resume multi_source'
90+
# G10
91+
'kafka_column_selector kafka_column_selector_avro ddl_with_random_move_table'
92+
# G11
93+
'fail_over region_merge multi_changefeeds'
94+
# G12
95+
'ddl_for_split_tables_random_schedule'
96+
# G13
97+
'debezium01 fail_over_ddl_mix'
98+
# G14
99+
'debezium02'
100+
# G15
101+
'debezium03'
102102
)
103103

104104
# 12 CPU cores will be allocated to run each pulsar heavy group in CI pipelines.
105105
pulsar_groups=(
106106
# G00
107-
# 'generate_column many_pk_or_uk multi_source'
108-
# # G01
109-
# 'canal_json_basic canal_json_claim_check canal_json_content_compatible ddl_for_split_tables_with_random_move_table'
110-
# # G02
111-
# 'canal_json_handle_key_only ddl_for_split_tables_with_failover'
112-
# # G03
113-
# 'canal_json_adapter_compatibility ddl_for_split_tables_with_merge_and_split'
114-
# # G04
115-
# 'open_protocol_claim_check open_protocol_handle_key_only'
116-
# # G05
117-
# 'move_table drop_many_tables checkpoint_race_ddl_crash'
118-
# # G06
119-
# 'cdc default_value ddl_for_split_tables_with_random_merge_and_split'
120-
# # G07
121-
# 'merge_table resolve_lock force_replicate_table'
122-
# # G08
123-
# 'tidb_mysql_test'
124-
# # G09
125-
# 'mq_sink_error_resume'
126-
# # G10
127-
# 'ddl_for_split_tables_random_schedule'
128-
# # G11
129-
# 'ddl_with_random_move_table'
130-
# # G12
131-
# 'fail_over region_merge multi_changefeeds'
132-
# # G13
133-
# 'debezium01 fail_over_ddl_mix'
134-
# # G14
135-
# 'debezium02'
136-
# # G15
137-
# 'debezium03'
107+
'generate_column many_pk_or_uk multi_source'
108+
# G01
109+
'canal_json_basic canal_json_claim_check canal_json_content_compatible ddl_for_split_tables_with_random_move_table'
110+
# G02
111+
'canal_json_handle_key_only ddl_for_split_tables_with_failover'
112+
# G03
113+
'canal_json_adapter_compatibility ddl_for_split_tables_with_merge_and_split'
114+
# G04
115+
'open_protocol_claim_check open_protocol_handle_key_only'
116+
# G05
117+
'move_table drop_many_tables checkpoint_race_ddl_crash'
118+
# G06
119+
'cdc default_value ddl_for_split_tables_with_random_merge_and_split'
120+
# G07
121+
'merge_table resolve_lock force_replicate_table'
122+
# G08
123+
'tidb_mysql_test'
124+
# G09
125+
'mq_sink_error_resume'
126+
# G10
127+
'ddl_for_split_tables_random_schedule'
128+
# G11
129+
'ddl_with_random_move_table'
130+
# G12
131+
'fail_over region_merge multi_changefeeds'
132+
# G13
133+
'debezium01 fail_over_ddl_mix'
134+
# G14
135+
'debezium02'
136+
# G15
137+
'debezium03'
138138
)
139139

140140
storage_groups=(
141141
# G00
142-
# 'generate_column many_pk_or_uk multi_source'
143-
# # G01
144-
# 'csv_storage_update_pk_clustered csv_storage_update_pk_nonclustered'
145-
# # G02
146-
# 'canal_json_storage_basic canal_json_storage_partition_table'
147-
# # G03
148-
# 'csv_storage_basic storage_csv_update'
149-
# # G04
150-
# 'ddl_for_split_tables_with_random_move_table'
151-
# # G05
152-
# 'move_table drop_many_tables'
153-
# # G06
154-
# 'cdc default_value checkpoint_race_ddl_crash'
155-
# # G07
156-
# 'merge_table resolve_lock force_replicate_table'
157-
# # G08
158-
# 'tidb_mysql_test'
159-
# # G09
160-
# 'ddl_for_split_tables_with_merge_and_split'
161-
# # G10
162-
# 'ddl_for_split_tables_with_random_merge_and_split'
163-
# # G11
164-
# 'ddl_for_split_tables_random_schedule'
165-
# # G12
166-
# 'ddl_with_random_move_table'
167-
# # G13
168-
# 'fail_over region_merge multi_changefeeds'
169-
# # G14
170-
# 'fail_over_ddl_mix'
171-
# # G15
172-
# 'random_drop_message'
142+
'generate_column many_pk_or_uk multi_source'
143+
# G01
144+
'csv_storage_update_pk_clustered csv_storage_update_pk_nonclustered'
145+
# G02
146+
'canal_json_storage_basic canal_json_storage_partition_table'
147+
# G03
148+
'csv_storage_basic storage_csv_update'
149+
# G04
150+
'ddl_for_split_tables_with_random_move_table'
151+
# G05
152+
'move_table drop_many_tables'
153+
# G06
154+
'cdc default_value checkpoint_race_ddl_crash'
155+
# G07
156+
'merge_table resolve_lock force_replicate_table'
157+
# G08
158+
'tidb_mysql_test'
159+
# G09
160+
'ddl_for_split_tables_with_merge_and_split'
161+
# G10
162+
'ddl_for_split_tables_with_random_merge_and_split'
163+
# G11
164+
'ddl_for_split_tables_random_schedule'
165+
# G12
166+
'ddl_with_random_move_table'
167+
# G13
168+
'fail_over region_merge multi_changefeeds'
169+
# G14
170+
'fail_over_ddl_mix'
171+
# G15
172+
'random_drop_message'
173173
)
174174

175175
# Source shared functions and check test coverage

0 commit comments

Comments
 (0)