Skip to content

Commit b9f4a61

Browse files
craig[bot]RaduBerindeandyyang890rafissjeffswenson
committed
154025: util: add yamlutil r=RaduBerinde a=RaduBerinde Add an `UnmarshalStrict()` wrapper to match the same function in yaml.v2 or earlier. Also add a `Marshal()` that indents to two spaces. Epic: none Release note: None 154485: roachprod/opentelemetry: add new CDC 25.4 metrics to DRT clusters r=log-head,asg0451 a=andyyang890 This patch adds the following metrics to DRT clusters: * changefeed.progress_skew.span * changefeed.progress_skew.table * changefeed.sink_backpressure_nanos * changefeed.stage.frontier_persistence.latency * changefeed.stage.pts.manage.latency * changefeed.stage.pts.manage_error.latency * changefeed.stage.pts.create.latency Epic: None Release note: None 154535: lease: remove compatibility with pre-24.2 lease table r=rafiss a=rafiss Epic: None Release note: None 154540: backupsink: fix ooming in TestFileSSTSinkWrite r=jeffswenson a=jeffswenson Previously, the test would allocate many 20 MiB+ buffers which sometimes oom'd the test runner. Now, the buffers allocated by the test are ~10KiB. It seems that the main reason the large buffers were required by the test is the flushes measure the compressed sst size. The test was generating long strings of zero values which compressed to almost nothing, so using correctly sized buffers would not trigger the required flushes. Release note: none Informs: #154360 Informs: #153662 Informs: #153162 Co-authored-by: Radu Berinde <[email protected]> Co-authored-by: Andy Yang <[email protected]> Co-authored-by: Rafi Shamim <[email protected]> Co-authored-by: Jeff Swenson <[email protected]>
5 parents 4e4d595 + 8e24f7a + 0e9f886 + d6a3f54 + b2ccb75 commit b9f4a61

File tree

17 files changed

+204
-92
lines changed

17 files changed

+204
-92
lines changed

pkg/BUILD.bazel

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -840,6 +840,7 @@ ALL_TESTS = [
840840
"//pkg/util/uuid:uuid_test",
841841
"//pkg/util/vector:vector_test",
842842
"//pkg/util/vfsutil:vfsutil_test",
843+
"//pkg/util/yamlutil:yamlutil_test",
843844
"//pkg/util:util_test",
844845
"//pkg/workload/bank:bank_test",
845846
"//pkg/workload/cli:cli_test",
@@ -2851,6 +2852,8 @@ GO_TARGETS = [
28512852
"//pkg/util/vector:vector_test",
28522853
"//pkg/util/vfsutil:vfsutil",
28532854
"//pkg/util/vfsutil:vfsutil_test",
2855+
"//pkg/util/yamlutil:yamlutil",
2856+
"//pkg/util/yamlutil:yamlutil_test",
28542857
"//pkg/util:util",
28552858
"//pkg/util:util_test",
28562859
"//pkg/workload/bank:bank",

pkg/backup/backupsink/file_sst_sink_test.go

Lines changed: 28 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,7 @@ package backupsink
88
import (
99
"context"
1010
"fmt"
11+
"math/rand"
1112
"reflect"
1213
"strconv"
1314
"strings"
@@ -122,6 +123,15 @@ func TestFileSSTSinkExtendOneFile(t *testing.T) {
122123
require.Equal(t, 1, len(progDetails.Files))
123124
}
124125

126+
func randomValue(n int64) []byte {
127+
// Create random data so that it does not compress well.
128+
b := make([]byte, n)
129+
for i := range b {
130+
b[i] = byte(rand.Int())
131+
}
132+
return b
133+
}
134+
125135
// TestFileSSTSinkWrite tests the contents of flushed files and the internal
126136
// unflushed files of the FileSSTSink under different write scenarios. Each test
127137
// writes a sequence of exportedSpans into a FileSSTSink. The test then verifies
@@ -133,6 +143,12 @@ func TestFileSSTSinkWrite(t *testing.T) {
133143
defer log.Scope(t).Close(t)
134144

135145
ctx := context.Background()
146+
testTargetFileSize := int64(10 << 10)
147+
148+
// Override the fileSpanByteLimit so we can test going over the limit without
149+
// needing large buffers that may oom the test node.
150+
defer func(oldLimit int64) { fileSpanByteLimit = oldLimit }(fileSpanByteLimit)
151+
fileSpanByteLimit = testTargetFileSize / 2
136152

137153
type testCase struct {
138154
name string
@@ -145,8 +161,7 @@ func TestFileSSTSinkWrite(t *testing.T) {
145161
//
146162
// TODO (msbutler): we currently don't test expected error handling. If this
147163
// is non-empty, we just skip the test.
148-
errorExplanation string
149-
noSSTSizeOverride bool
164+
errorExplanation string
150165
}
151166

152167
for _, tt := range []testCase{{name: "out-of-order-key-boundary",
@@ -278,7 +293,7 @@ func TestFileSSTSinkWrite(t *testing.T) {
278293
{
279294
name: "size-flush",
280295
exportSpans: []ExportedSpan{
281-
newExportedSpanBuilder("a", "c").withKVs([]kvAndTS{{key: "a", timestamp: 10, value: make([]byte, 20<<20)}, {key: "b", timestamp: 10}}).build(),
296+
newExportedSpanBuilder("a", "c").withKVs([]kvAndTS{{key: "a", timestamp: 10, value: randomValue(testTargetFileSize)}, {key: "b", timestamp: 10}}).build(),
282297
newExportedSpanBuilder("d", "f").withKVs([]kvAndTS{{key: "d", timestamp: 10}, {key: "e", timestamp: 10}}).build(),
283298
},
284299
flushedSpans: []roachpb.Spans{
@@ -292,7 +307,7 @@ func TestFileSSTSinkWrite(t *testing.T) {
292307
// No flush can occur between two versions of the same key. Further, we must combine flushes which split a row.
293308
name: "no-size-flush-if-mid-mvcc",
294309
exportSpans: []ExportedSpan{
295-
newRawExportedSpanBuilder(s2k0("a"), s2k0("c"), s2k0("c")).withKVs([]kvAndTS{{key: "a", timestamp: 10, value: make([]byte, 20<<20)}, {key: "c", timestamp: 10}}).build(),
310+
newRawExportedSpanBuilder(s2k0("a"), s2k0("c"), s2k0("c")).withKVs([]kvAndTS{{key: "a", timestamp: 10, value: randomValue(testTargetFileSize)}, {key: "c", timestamp: 10}}).build(),
296311
newRawExportedSpanBuilder(s2k0("c"), s2k0("f"), s2k0("f")).withKVs([]kvAndTS{{key: "c", timestamp: 8}, {key: "f", timestamp: 10}}).build(),
297312
},
298313
flushedSpans: []roachpb.Spans{},
@@ -305,9 +320,9 @@ func TestFileSSTSinkWrite(t *testing.T) {
305320
name: "no-size-flush-mid-col-family",
306321
exportSpans: []ExportedSpan{
307322
newRawExportedSpanBuilder(s2kWithColFamily("c", 0), s2kWithColFamily("c", 1), s2kWithColFamily("c", 1)).withKVs([]kvAndTS{
308-
{key: "c", timestamp: 10, value: make([]byte, 20<<20)}}).build(),
323+
{key: "c", timestamp: 10, value: randomValue(testTargetFileSize)}}).build(),
309324
newRawExportedSpanBuilder(s2kWithColFamily("c", 1), s2kWithColFamily("c", 2), s2kWithColFamily("c", 2)).withKVs([]kvAndTS{
310-
{key: "c", timestamp: 10, value: make([]byte, 20<<20)}}).buildWithEncoding(func(stingedKey string) roachpb.Key { return s2kWithColFamily(stingedKey, 1) }),
325+
{key: "c", timestamp: 10, value: randomValue(testTargetFileSize)}}).buildWithEncoding(func(stingedKey string) roachpb.Key { return s2kWithColFamily(stingedKey, 1) }),
311326
},
312327
flushedSpans: []roachpb.Spans{},
313328
unflushedSpans: []roachpb.Spans{
@@ -318,7 +333,7 @@ func TestFileSSTSinkWrite(t *testing.T) {
318333
// It's safe to flush at the range boundary.
319334
name: "size-flush-at-range-boundary",
320335
exportSpans: []ExportedSpan{
321-
newRawExportedSpanBuilder(s2k("a"), s2k("d"), s2k("d")).withKVs([]kvAndTS{{key: "a", timestamp: 10, value: make([]byte, 20<<20)}, {key: "c", timestamp: 10}}).build(),
336+
newRawExportedSpanBuilder(s2k("a"), s2k("d"), s2k("d")).withKVs([]kvAndTS{{key: "a", timestamp: 10, value: randomValue(testTargetFileSize)}, {key: "c", timestamp: 10}}).build(),
322337
},
323338
flushedSpans: []roachpb.Spans{
324339
{{Key: s2k("a"), EndKey: s2k("d")}},
@@ -332,7 +347,7 @@ func TestFileSSTSinkWrite(t *testing.T) {
332347
// row between two column families.
333348
name: "trim-resume-key",
334349
exportSpans: []ExportedSpan{
335-
newRawExportedSpanBuilder(s2k0("a"), s2k0("c"), s2k("c")).withKVs([]kvAndTS{{key: "a", timestamp: 10, value: make([]byte, 20<<20)}}).build(),
350+
newRawExportedSpanBuilder(s2k0("a"), s2k0("c"), s2k("c")).withKVs([]kvAndTS{{key: "a", timestamp: 10, value: randomValue(testTargetFileSize)}}).build(),
336351
},
337352
flushedSpans: []roachpb.Spans{
338353
{{Key: s2k0("a"), EndKey: s2k("c")}},
@@ -344,24 +359,23 @@ func TestFileSSTSinkWrite(t *testing.T) {
344359
// even if the next span's start key matches the file's end key.
345360
name: "file-size-cut",
346361
exportSpans: []ExportedSpan{
347-
newExportedSpanBuilder("a", "c").withKVs([]kvAndTS{{key: "a", timestamp: 10, value: make([]byte, 64<<20)}, {key: "b", timestamp: 10}}).build(),
362+
newExportedSpanBuilder("a", "c").withKVs([]kvAndTS{{key: "a", timestamp: 10, value: randomValue(fileSpanByteLimit)}, {key: "b", timestamp: 10}}).build(),
348363
newExportedSpanBuilder("c", "f").withKVs([]kvAndTS{{key: "c", timestamp: 10}, {key: "e", timestamp: 10}}).build(),
349364
},
350365
flushedSpans: []roachpb.Spans{},
351366
unflushedSpans: []roachpb.Spans{
352367
{{Key: s2k0("a"), EndKey: s2k0("c")}, {Key: s2k0("c"), EndKey: s2k0("f")}},
353368
},
354-
noSSTSizeOverride: true,
355369
},
356370
{
357371
// No file cut can occur between the two column families of the same row,
358372
// even if the file is sufficiently large to get cut.
359373
name: "no-file-cut-mid-col-family",
360374
exportSpans: []ExportedSpan{
361375
newRawExportedSpanBuilder(s2kWithColFamily("c", 0), s2kWithColFamily("c", 1), s2kWithColFamily("c", 1)).withKVs([]kvAndTS{
362-
{key: "c", timestamp: 10, value: make([]byte, 65<<20)}}).build(),
376+
{key: "c", timestamp: 10, value: randomValue(testTargetFileSize)}}).build(),
363377
newRawExportedSpanBuilder(s2kWithColFamily("c", 1), s2kWithColFamily("c", 2), s2kWithColFamily("c", 2)).withKVs([]kvAndTS{
364-
{key: "c", timestamp: 10, value: make([]byte, 20<<20)}}).buildWithEncoding(func(stingedKey string) roachpb.Key { return s2kWithColFamily(stingedKey, 1) }),
378+
{key: "c", timestamp: 10, value: randomValue(testTargetFileSize / 2)}}).buildWithEncoding(func(stingedKey string) roachpb.Key { return s2kWithColFamily(stingedKey, 1) }),
365379
},
366380
flushedSpans: []roachpb.Spans{},
367381
unflushedSpans: []roachpb.Spans{
@@ -377,9 +391,7 @@ func TestFileSSTSinkWrite(t *testing.T) {
377391
return
378392
}
379393
st := cluster.MakeTestingClusterSettings()
380-
if !tt.noSSTSizeOverride {
381-
targetFileSize.Override(ctx, &st.SV, 10<<10)
382-
}
394+
targetFileSize.Override(ctx, &st.SV, testTargetFileSize)
383395

384396
sink, store := fileSSTSinkTestSetup(t, st, elide)
385397
defer func() {
@@ -534,7 +546,7 @@ func TestFileSSTSinkStats(t *testing.T) {
534546
sinkStats{hlc.Timestamp{WallTime: 10}, 3, 3, 0, 0, 0, 1}},
535547
{
536548
// Write an exported span that comes after all spans so far. This span has enough data for a size flush.
537-
newExportedSpanBuilder("g", "h").withKVs([]kvAndTS{{key: "g", timestamp: 10, value: make([]byte, 20<<20)}}).build(),
549+
newExportedSpanBuilder("g", "h").withKVs([]kvAndTS{{key: "g", timestamp: 10, value: randomValue(10 << 10)}}).build(),
538550
sinkStats{hlc.Timestamp{WallTime: 0}, 0, 4, 1, 0, 1, 1}},
539551
{
540552
// Write the first exported span after the flush.

pkg/roachprod/install/BUILD.bazel

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -85,6 +85,7 @@ go_test(
8585
"//pkg/util/randutil",
8686
"//pkg/util/retry",
8787
"//pkg/util/syncutil",
88+
"//pkg/util/yamlutil",
8889
"@com_github_cockroachdb_datadriven//:datadriven",
8990
"@com_github_cockroachdb_errors//:errors",
9091
"@com_github_stretchr_testify//require",

pkg/roachprod/install/cluster_settings_test.go

Lines changed: 2 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -6,9 +6,9 @@
66
package install
77

88
import (
9-
"bytes"
109
"testing"
1110

11+
"github.com/cockroachdb/cockroach/pkg/util/yamlutil"
1212
"github.com/stretchr/testify/require"
1313
"gopkg.in/yaml.v3"
1414
)
@@ -22,10 +22,7 @@ func TestClusterSettingOptionListCodec(t *testing.T) {
2222
require.NoError(t, err)
2323

2424
var decOpts ClusterSettingOptionList
25-
dec := yaml.NewDecoder(bytes.NewReader(data))
26-
dec.KnownFields(true)
27-
err = dec.Decode(&decOpts)
28-
require.NoError(t, err)
25+
require.NoError(t, yamlutil.UnmarshalStrict(data, &decOpts))
2926

3027
require.Equal(t, opts, decOpts)
3128
require.Equal(t, MakeClusterSettings(opts...), MakeClusterSettings(decOpts...))

pkg/roachprod/opentelemetry/cockroachdb_metrics.go

Lines changed: 22 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -405,6 +405,8 @@ var cockroachdbMetrics = map[string]string{
405405
"changefeed_parallel_io_result_queue_nanos_bucket": "changefeed.parallel_io_result_queue_nanos.bucket",
406406
"changefeed_parallel_io_result_queue_nanos_count": "changefeed.parallel_io_result_queue_nanos.count",
407407
"changefeed_parallel_io_result_queue_nanos_sum": "changefeed.parallel_io_result_queue_nanos.sum",
408+
"changefeed_progress_skew_span": "changefeed.progress_skew.span",
409+
"changefeed_progress_skew_table": "changefeed.progress_skew.table",
408410
"changefeed_queue_time_nanos": "changefeed.queue_time",
409411
"changefeed_running": "changefeed.running",
410412
"changefeed_schema_registry_registrations": "changefeed.schema_registry.registrations",
@@ -415,6 +417,10 @@ var cockroachdbMetrics = map[string]string{
415417
"changefeed_sink_batch_hist_nanos_bucket": "changefeed.sink_batch_hist_nanos.bucket",
416418
"changefeed_sink_batch_hist_nanos_count": "changefeed.sink_batch_hist_nanos.count",
417419
"changefeed_sink_batch_hist_nanos_sum": "changefeed.sink_batch_hist_nanos.sum",
420+
"changefeed_sink_backpressure_nanos": "changefeed.sink_backpressure_nanos",
421+
"changefeed_sink_backpressure_nanos_bucket": "changefeed.sink_backpressure_nanos.bucket",
422+
"changefeed_sink_backpressure_nanos_count": "changefeed.sink_backpressure_nanos.count",
423+
"changefeed_sink_backpressure_nanos_sum": "changefeed.sink_backpressure_nanos.sum",
418424
"changefeed_sink_io_inflight": "changefeed.sink_io_inflight",
419425
"changefeed_size_based_flushes": "changefeed.size_based_flushes",
420426
"changefeed_stage_checkpoint_job_progress_latency": "changefeed.stage.checkpoint_job_progress.latency",
@@ -433,6 +439,10 @@ var cockroachdbMetrics = map[string]string{
433439
"changefeed_stage_encode_latency_bucket": "changefeed.stage.encode.latency.bucket",
434440
"changefeed_stage_encode_latency_count": "changefeed.stage.encode.latency.count",
435441
"changefeed_stage_encode_latency_sum": "changefeed.stage.encode.latency.sum",
442+
"changefeed_stage_frontier_persistence_latency": "changefeed.stage.frontier_persistence.latency",
443+
"changefeed_stage_frontier_persistence_latency_bucket": "changefeed.stage.frontier_persistence.latency.bucket",
444+
"changefeed_stage_frontier_persistence_latency_count": "changefeed.stage.frontier_persistence.latency.count",
445+
"changefeed_stage_frontier_persistence_latency_sum": "changefeed.stage.frontier_persistence.latency.sum",
436446
"changefeed_stage_kv_feed_buffer_latency": "changefeed.stage.kv_feed_buffer.latency",
437447
"changefeed_stage_kv_feed_buffer_latency_bucket": "changefeed.stage.kv_feed_buffer.latency.bucket",
438448
"changefeed_stage_kv_feed_buffer_latency_count": "changefeed.stage.kv_feed_buffer.latency.count",
@@ -441,6 +451,18 @@ var cockroachdbMetrics = map[string]string{
441451
"changefeed_stage_kv_feed_wait_for_table_event_latency_bucket": "changefeed.stage.kv_feed_wait_for_table_event.latency.bucket",
442452
"changefeed_stage_kv_feed_wait_for_table_event_latency_count": "changefeed.stage.kv_feed_wait_for_table_event.latency.count",
443453
"changefeed_stage_kv_feed_wait_for_table_event_latency_sum": "changefeed.stage.kv_feed_wait_for_table_event.latency.sum",
454+
"changefeed_stage_pts_create_latency": "changefeed.stage.pts.create.latency",
455+
"changefeed_stage_pts_create_latency_bucket": "changefeed.stage.pts.create.latency.bucket",
456+
"changefeed_stage_pts_create_latency_count": "changefeed.stage.pts.create.latency.count",
457+
"changefeed_stage_pts_create_latency_sum": "changefeed.stage.pts.create.latency.sum",
458+
"changefeed_stage_pts_manage_error_latency": "changefeed.stage.pts.manage_error.latency",
459+
"changefeed_stage_pts_manage_error_latency_bucket": "changefeed.stage.pts.manage_error.latency.bucket",
460+
"changefeed_stage_pts_manage_error_latency_count": "changefeed.stage.pts.manage_error.latency.count",
461+
"changefeed_stage_pts_manage_error_latency_sum": "changefeed.stage.pts.manage_error.latency.sum",
462+
"changefeed_stage_pts_manage_latency": "changefeed.stage.pts.manage.latency",
463+
"changefeed_stage_pts_manage_latency_bucket": "changefeed.stage.pts.manage.latency.bucket",
464+
"changefeed_stage_pts_manage_latency_count": "changefeed.stage.pts.manage.latency.count",
465+
"changefeed_stage_pts_manage_latency_sum": "changefeed.stage.pts.manage.latency.sum",
444466
"changefeed_stage_rangefeed_buffer_checkpoint_latency": "changefeed.stage.rangefeed_buffer_checkpoint.latency",
445467
"changefeed_stage_rangefeed_buffer_checkpoint_latency_bucket": "changefeed.stage.rangefeed_buffer_checkpoint.latency.bucket",
446468
"changefeed_stage_rangefeed_buffer_checkpoint_latency_count": "changefeed.stage.rangefeed_buffer_checkpoint.latency.count",

pkg/roachprod/roachprodutil/codec/BUILD.bazel

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,7 @@ go_test(
1616
],
1717
embed = [":codec"],
1818
deps = [
19+
"//pkg/util/yamlutil",
1920
"@com_github_stretchr_testify//require",
2021
"@in_gopkg_yaml_v3//:yaml_v3",
2122
],

pkg/roachprod/roachprodutil/codec/types_test.go

Lines changed: 2 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -6,10 +6,10 @@
66
package codec
77

88
import (
9-
"bytes"
109
"reflect"
1110
"testing"
1211

12+
"github.com/cockroachdb/cockroach/pkg/util/yamlutil"
1313
"github.com/stretchr/testify/require"
1414
"gopkg.in/yaml.v3"
1515
)
@@ -60,10 +60,7 @@ func TestDynamicTypes(t *testing.T) {
6060
require.NoError(t, err)
6161

6262
var decoded ListWrapper[Animal]
63-
dec := yaml.NewDecoder(bytes.NewReader(data))
64-
dec.KnownFields(true)
65-
err = dec.Decode(&decoded)
66-
require.NoError(t, err)
63+
require.NoError(t, yamlutil.UnmarshalStrict(data, &decoded))
6764

6865
// Verify equality of the decoded list and the original list.
6966
require.Equal(t, animals, decoded.Get())

pkg/sql/catalog/lease/count.go

Lines changed: 2 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -94,7 +94,6 @@ func countLeasesWithDetail(
9494
) (countDetail, error) {
9595
var whereClause []string
9696
forceMultiRegionQuery := false
97-
useBytesOnRetry := false
9897
for _, t := range versions {
9998
versionClause := ""
10099
if !forAnyVersion {
@@ -120,19 +119,13 @@ func countLeasesWithDetail(
120119
// entire table.
121120
if (cachedDatabaseRegions != nil && cachedDatabaseRegions.IsMultiRegion()) ||
122121
forceMultiRegionQuery {
123-
// If we are injecting a raw leases descriptors, that will not have the enum
124-
// type set, so convert the region to byte equivalent physical representation.
125-
detail, err = countLeasesByRegion(ctx, txn, prober, regionMap, cachedDatabaseRegions,
126-
useBytesOnRetry, at, whereClause)
122+
detail, err = countLeasesByRegion(ctx, txn, prober, regionMap, at, whereClause)
127123
} else {
128124
detail, err = countLeasesNonMultiRegion(ctx, txn, at, whereClause)
129125
}
130126
// If any transient region column errors occur then we should retry the count query.
131127
if isTransientRegionColumnError(err) {
132128
forceMultiRegionQuery = true
133-
// If the query was already multi-region aware, then the system database is MR,
134-
// but our lease descriptor has not been upgraded yet.
135-
useBytesOnRetry = cachedDatabaseRegions != nil && cachedDatabaseRegions.IsMultiRegion()
136129
return txn.KV().GenerateForcedRetryableErr(ctx, "forcing retry once with MR columns")
137130
}
138131

@@ -187,15 +180,10 @@ func countLeasesByRegion(
187180
txn isql.Txn,
188181
prober regionliveness.Prober,
189182
regionMap regionliveness.LiveRegions,
190-
cachedDBRegions regionliveness.CachedDatabaseRegions,
191-
convertRegionsToBytes bool,
192183
at hlc.Timestamp,
193184
whereClauses []string,
194185
) (countDetail, error) {
195186
regionClause := "crdb_region=$2::system.crdb_internal_region"
196-
if convertRegionsToBytes {
197-
regionClause = "crdb_region=$2"
198-
}
199187
stmt := fmt.Sprintf(
200188
`SELECT %[1]s FROM system.public.lease AS OF SYSTEM TIME '%[2]s' WHERE %[3]s `,
201189
getCountLeaseColumns(),
@@ -204,28 +192,13 @@ func countLeasesByRegion(
204192
)
205193
var detail countDetail
206194
if err := regionMap.ForEach(func(region string) error {
207-
regionEnumValue := region
208-
// The leases table descriptor injected does not have the type of the column
209-
// set to the region enum type. So, instead convert the logical value to
210-
// the physical one for comparison.
211-
// TODO(fqazi): In 24.2 when this table format is default we can stop using
212-
// synthetic descriptors and use the first code path.
213-
if convertRegionsToBytes {
214-
regionTypeDesc := cachedDBRegions.GetRegionEnumTypeDesc().AsRegionEnumTypeDescriptor()
215-
for i := 0; i < regionTypeDesc.NumEnumMembers(); i++ {
216-
if regionTypeDesc.GetMemberLogicalRepresentation(i) == region {
217-
regionEnumValue = string(regionTypeDesc.GetMemberPhysicalRepresentation(i))
218-
break
219-
}
220-
}
221-
}
222195
var values tree.Datums
223196
queryRegionRows := func(countCtx context.Context) error {
224197
var err error
225198
values, err = txn.QueryRowEx(
226199
countCtx, "count-leases", txn.KV(),
227200
sessiondata.NodeUserSessionDataOverride,
228-
stmt, at.GoTime(), regionEnumValue,
201+
stmt, at.GoTime(), region,
229202
)
230203
return err
231204
}

0 commit comments

Comments
 (0)