Skip to content

Commit 1c93e8a

Browse files
craig[bot]tbgrafiss
committed
143270: kvserver: better obs in TestTxnReadWithinUncertaintyIntervalAfterRangeMerge r=tbg a=tbg Closes #143260. This is a complex test with a rare failure mode. Trace all of the relevant operations so that we can meaningfully engage with it. Epic: none Release note: none 143451: sql,backfill: avoid holding a protected timestamp during the backfill r=rafiss a=rafiss ### sql,rowexec: use WriteTS as timestamp for AddSSTable in backfill PR #64023 made it so we use the backfill's read TS as the batch TS for the AddSSTable operation. Simultaneously with that change, #63945 was being worked on, which made it so that we use a fixed _write_ timestamp for the keys that are backfilled. This write timestamp is the actual minimum lower bound timestamp for the backfill operation, and the read timstamp is allowed to change, so it makes more sense to use the write timestamp for the AddSSTable operation. Looking at the diffs in the PRs makes it seem pretty likely that this was just missed as part of a trivial branch skew. This commit does not cause any behavior change, since at the time of writing, the read timestamp and write timestamp are always identical for the backfill. Release note: None ### sql,backfill: choose new read timestamp when backfill job is resumed When #73861 got merged, we seem to have accidentally made it so the index backfiller will never select a new read timestamp. That is contrary to the goals of #63945, where we initially added the ability to advance the read timestamp forward. This mistake is why we were seeing behavior where a GC threshold error would cause the backfill to retry infinitely (which in turn led us to treat as a permanent error in #139203, which was something we never should have done). We never noticed the bug because when CREATE INDEX was added to the declarative schema changer in #92128, we turned off the declarative schema changer in the test that would have caught this (i.e. TestIndexBackfillAfterGC). This commit makes it so we choose a current timestamp as the read timestamp when planning the backfill, and fixes up that test to show that this works for index backfills in the declarative schema changer. Release note (bug fix): Fixed a bug where a GC threshold error (which appears as "batch timestamp must be after replica GC threshold ...") could cause a schema change that backfills data to fail. Now, the error will cause the backfill to be retried at a higher timestamp to avoid the error. ### sql, backfill: use a current timestamp to scan each chunk of the source index There's no need for the backfill to use a fixed read timestamp for the entire job. Instead, each chunk of the original index that needs to be scanned can use a current timestamp. This allows us to remove all the logic for adding protected timestamps for the backfill. Release note (performance improvement): Schema changes that require data to be backfilled no longer hold a protected timestamp for the entire duration of the backfill, which means there is less overhead caused by MVCC garbage collection after the backfill completes. ### sql: stop setting readAsOf in index backfiller As of the parent commit, this is unused by the index backfiller. Release note: None --- fixes #140629 informs #142339 informs #142117 informs #141773 Co-authored-by: Tobias Grieger <[email protected]> Co-authored-by: Rafi Shamim <[email protected]>
3 parents 0952817 + ba89df8 + bae95e3 commit 1c93e8a

File tree

11 files changed

+131
-152
lines changed

11 files changed

+131
-152
lines changed

pkg/kv/kvpb/BUILD.bazel

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -78,6 +78,7 @@ go_test(
7878
"//pkg/roachpb",
7979
"//pkg/storage/enginepb",
8080
"//pkg/testutils/echotest",
81+
"//pkg/testutils/skip",
8182
"//pkg/util/buildutil",
8283
"//pkg/util/encoding",
8384
"//pkg/util/hlc",

pkg/kv/kvpb/batch_test.go

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,7 @@ import (
1414
"github.com/cockroachdb/cockroach/pkg/kv/kvserver/concurrency/lock"
1515
"github.com/cockroachdb/cockroach/pkg/roachpb"
1616
"github.com/cockroachdb/cockroach/pkg/storage/enginepb"
17+
"github.com/cockroachdb/cockroach/pkg/testutils/skip"
1718
"github.com/cockroachdb/cockroach/pkg/util/hlc"
1819
"github.com/kr/pretty"
1920
"github.com/stretchr/testify/require"
@@ -554,6 +555,7 @@ func TestRefreshSpanIterateSkipLocked(t *testing.T) {
554555
}
555556

556557
func TestResponseKeyIterate(t *testing.T) {
558+
skip.UnderNonTestBuild(t) // some assertions that are checked are only returned in test builds
557559
keyA, keyB := roachpb.Key("a"), roachpb.Key("b")
558560
keyC, keyD := roachpb.Key("c"), roachpb.Key("d")
559561

pkg/kv/kvserver/client_replica_test.go

Lines changed: 51 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -1011,21 +1011,52 @@ func TestTxnReadWithinUncertaintyIntervalAfterRangeMerge(t *testing.T) {
10111011
// writing.
10121012
manuals[2].Increment(2000)
10131013

1014+
defer func() {
1015+
if !t.Failed() {
1016+
return
1017+
}
1018+
t.Logf("maxNanos=%d", maxNanos)
1019+
t.Logf("manuals[2]=%d", manuals[2].UnixNano())
1020+
}()
1021+
10141022
// Write the data from a different transaction to establish the time for the
10151023
// key as 10 ns in the future.
1016-
_, pErr := kv.SendWrapped(ctx, tc.Servers[2].DistSenderI().(kv.Sender), putArgs(keyC, []byte("value")))
1017-
require.Nil(t, pErr)
1024+
{
1025+
ctx, fagrs := tracing.ContextWithRecordingSpan(ctx, tc.Servers[0].Tracer(), "keyC-write")
1026+
resp, pErr := kv.SendWrapped(ctx, tc.Servers[2].DistSenderI().(kv.Sender), putArgs(keyC, []byte("value")))
1027+
rec := fagrs()
1028+
require.Nil(t, pErr, "%v", rec)
1029+
defer func() {
1030+
if !t.Failed() {
1031+
return
1032+
}
1033+
t.Logf("keyC-write: %+v", resp)
1034+
t.Logf("keyC-write: %s", rec)
1035+
}()
1036+
}
10181037

10191038
// Create two identical transactions. The first one will perform a read to a
10201039
// store before the merge, the second will only read after the merge.
10211040
txn := roachpb.MakeTransaction("txn1", keyA, isolation.Serializable, 1, now, maxOffset, instanceId, 0, false /* omitInRangefeeds */)
10221041
txn2 := roachpb.MakeTransaction("txn2", keyA, isolation.Serializable, 1, now, maxOffset, instanceId, 0, false /* omitInRangefeeds */)
10231042

1024-
// Simulate a read which will cause the observed time to be set to now
1025-
resp, pErr := kv.SendWrappedWith(ctx, tc.Servers[1].DistSenderI().(kv.Sender), kvpb.Header{Txn: &txn}, getArgs(keyA))
1026-
require.Nil(t, pErr)
1027-
// The client needs to update its transaction to the returned transaction which has observed timestamps in it
1028-
txn = *resp.Header().Txn
1043+
// Simulate a read which will cause the observed time to be set to now.
1044+
{
1045+
ctx, fagrs := tracing.ContextWithRecordingSpan(ctx, tc.Servers[0].Tracer(), "txn1-keyA-get")
1046+
resp, pErr := kv.SendWrappedWith(ctx, tc.Servers[1].DistSenderI().(kv.Sender), kvpb.Header{Txn: &txn}, getArgs(keyA))
1047+
rec := fagrs()
1048+
require.Nil(t, pErr, "%v", rec)
1049+
// The client needs to update its transaction to the returned transaction which has observed timestamps in it
1050+
txn = *resp.Header().Txn
1051+
defer func() {
1052+
if !t.Failed() {
1053+
return
1054+
}
1055+
t.Logf("txn1-keyA-get: %+v", resp)
1056+
t.Logf("txn1-keyA-get: %s", rec)
1057+
t.Logf("txn1-keyA-get resp txn: %s", txn)
1058+
}()
1059+
}
10291060

10301061
// Now move the ranges, being careful not to move either leaseholder
10311062
// C: 3 (RHS - replica) => 0 (LHS leaseholder)
@@ -1048,8 +1079,13 @@ func TestTxnReadWithinUncertaintyIntervalAfterRangeMerge(t *testing.T) {
10481079

10491080
// Try and read the transaction from the context of a new transaction. This
10501081
// will fail as expected as the observed timestamp will not be set.
1051-
_, pErr = kv.SendWrappedWith(ctx, tc.Servers[0].DistSenderI().(kv.Sender), kvpb.Header{Txn: &txn2}, getArgs(keyC))
1052-
require.IsType(t, &kvpb.ReadWithinUncertaintyIntervalError{}, pErr.GetDetail())
1082+
{
1083+
ctx, fagrs := tracing.ContextWithRecordingSpan(ctx, tc.Servers[0].Tracer(), "txn2get-should-rwue")
1084+
_, pErr := kv.SendWrappedWith(ctx, tc.Servers[0].DistSenderI().(kv.Sender), kvpb.Header{Txn: &txn2},
1085+
getArgs(keyC))
1086+
rec := fagrs()
1087+
require.IsType(t, &kvpb.ReadWithinUncertaintyIntervalError{}, pErr.GetDetail(), "%s", rec)
1088+
}
10531089

10541090
// Try and read the key from the existing transaction. This should fail the
10551091
// same way.
@@ -1058,8 +1094,12 @@ func TestTxnReadWithinUncertaintyIntervalAfterRangeMerge(t *testing.T) {
10581094
// - Other error (Bad) - We expect an uncertainty error so the client can choose a new timestamp and retry.
10591095
// - Not found (Bad) - Error because the data was written before us.
10601096
// - Found (Bad) - The write HLC timestamp is after our timestamp.
1061-
_, pErr = kv.SendWrappedWith(ctx, tc.Servers[0].DistSenderI().(kv.Sender), kvpb.Header{Txn: &txn}, getArgs(keyC))
1062-
require.IsType(t, &kvpb.ReadWithinUncertaintyIntervalError{}, pErr.GetDetail())
1097+
{
1098+
ctx, fagrs := tracing.ContextWithRecordingSpan(ctx, tc.Servers[0].Tracer(), "txn1get-should-rwue")
1099+
_, pErr := kv.SendWrappedWith(ctx, tc.Servers[0].DistSenderI().(kv.Sender), kvpb.Header{Txn: &txn}, getArgs(keyC))
1100+
rec := fagrs()
1101+
require.IsType(t, &kvpb.ReadWithinUncertaintyIntervalError{}, pErr.GetDetail(), "%s", rec)
1102+
}
10631103
}
10641104

10651105
testutils.RunTrueAndFalse(t, "alignLeaseholders", func(t *testing.T, alignLeaseholders bool) {

pkg/sql/backfill.go

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1012,8 +1012,6 @@ func (sc *SchemaChanger) distIndexBackfill(
10121012
log.Infof(ctx, "writing at persisted safe write time %v...", writeAsOf)
10131013
}
10141014

1015-
readAsOf := sc.clock.Now()
1016-
10171015
var p *PhysicalPlan
10181016
var extEvalCtx extendedEvalContext
10191017
var planCtx *PlanningCtx
@@ -1045,7 +1043,7 @@ func (sc *SchemaChanger) distIndexBackfill(
10451043
)
10461044
indexBatchSize := indexBackfillBatchSize.Get(&sc.execCfg.Settings.SV)
10471045
chunkSize := sc.getChunkSize(indexBatchSize)
1048-
spec, err := initIndexBackfillerSpec(*tableDesc.TableDesc(), writeAsOf, readAsOf, writeAtRequestTimestamp, chunkSize, addedIndexes, 0 /* sourceIndexID*/)
1046+
spec, err := initIndexBackfillerSpec(*tableDesc.TableDesc(), writeAsOf, writeAtRequestTimestamp, chunkSize, addedIndexes, 0)
10491047
if err != nil {
10501048
return err
10511049
}

pkg/sql/backfill_protected_timestamp_test.go

Lines changed: 17 additions & 67 deletions
Original file line numberDiff line numberDiff line change
@@ -24,8 +24,6 @@ import (
2424
"github.com/cockroachdb/cockroach/pkg/spanconfig/spanconfigptsreader"
2525
"github.com/cockroachdb/cockroach/pkg/sql"
2626
"github.com/cockroachdb/cockroach/pkg/sql/catalog/descs"
27-
"github.com/cockroachdb/cockroach/pkg/sql/execinfra"
28-
"github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scexec"
2927
"github.com/cockroachdb/cockroach/pkg/sql/sem/eval"
3028
"github.com/cockroachdb/cockroach/pkg/testutils"
3129
"github.com/cockroachdb/cockroach/pkg/testutils/serverutils"
@@ -118,7 +116,7 @@ func TestValidationWithProtectedTS(t *testing.T) {
118116
}
119117
for _, sql := range []string{
120118
"SET CLUSTER SETTING sql.stats.automatic_collection.enabled = false",
121-
"ALTER DATABASE defaultdb CONFIGURE ZONE USING gc.ttlseconds = 1",
119+
"ALTER DATABASE defaultdb CONFIGURE ZONE USING gc.ttlseconds = 3",
122120
"CREATE TABLE t(n int)",
123121
"ALTER TABLE t CONFIGURE ZONE USING range_min_bytes = 0, range_max_bytes = 67108864, gc.ttlseconds = 1",
124122
"INSERT INTO t(n) SELECT * FROM generate_series(1, 250000)",
@@ -223,10 +221,10 @@ func TestValidationWithProtectedTS(t *testing.T) {
223221
}
224222
}
225223

226-
// TestBackfillWithProtectedTS runs operations that backfill into a table and
224+
// TestBackfillWithProtectedTS runs a query backfill into a table and
227225
// confirms that a protected timestamp is setup. It also confirms that if the
228226
// protected timestamp is not ready in time we do not infinitely retry.
229-
func TestBackfillWithProtectedTS(t *testing.T) {
227+
func TestBackfillQueryWithProtectedTS(t *testing.T) {
230228
defer leaktest.AfterTest(t)()
231229
defer log.Scope(t).Close(t)
232230

@@ -239,7 +237,6 @@ func TestBackfillWithProtectedTS(t *testing.T) {
239237
backfillQueryResume := make(chan struct{})
240238
blockBackFillsForPTSFailure := atomic.Bool{}
241239
blockBackFillsForPTSCheck := atomic.Bool{}
242-
usingDeclarativeSchemaChanger := atomic.Bool{}
243240
var s serverutils.TestServerInterface
244241
var db *gosql.DB
245242
var tableID uint32
@@ -248,46 +245,11 @@ func TestBackfillWithProtectedTS(t *testing.T) {
248245
SQLEvalContext: &eval.TestingKnobs{
249246
ForceProductionValues: true,
250247
},
251-
SQLDeclarativeSchemaChanger: &scexec.TestingKnobs{
252-
RunBeforeBackfill: func() error {
253-
// Cause the backfill to pause before adding the protected
254-
// timestamp. This knob is for testing schema changes that
255-
// are on the declarative schema changer.
256-
if blockBackFillsForPTSFailure.Load() && usingDeclarativeSchemaChanger.Load() {
257-
if !blockBackFillsForPTSFailure.Swap(false) {
258-
return nil
259-
}
260-
backfillQueryWait <- struct{}{}
261-
<-backfillQueryResume
262-
}
263-
return nil
264-
},
265-
},
266-
DistSQL: &execinfra.TestingKnobs{
267-
RunBeforeBackfillChunk: func(sp roachpb.Span) error {
268-
// Cause the backfill to pause after it already began running
269-
// and has installed a protected timestamp. This knob is for
270-
// testing schema changes that use the index backfiller.
271-
if blockBackFillsForPTSCheck.Load() && usingDeclarativeSchemaChanger.Load() {
272-
_, prefix, err := s.Codec().DecodeTablePrefix(sp.Key)
273-
if err != nil || prefix != tableID {
274-
//nolint:returnerrcheck
275-
return nil
276-
}
277-
if !blockBackFillsForPTSCheck.Swap(false) {
278-
return nil
279-
}
280-
backfillQueryWait <- struct{}{}
281-
<-backfillQueryResume
282-
}
283-
return nil
284-
},
285-
},
286248
SQLSchemaChanger: &sql.SchemaChangerTestingKnobs{
287249
RunBeforeQueryBackfill: func() error {
288250
// Cause the backfill to pause before adding the protected
289251
// timestamp. This knob is for testing CREATE MATERIALIZED VIEW.
290-
if blockBackFillsForPTSFailure.Load() && !usingDeclarativeSchemaChanger.Load() {
252+
if blockBackFillsForPTSFailure.Load() {
291253
if !blockBackFillsForPTSFailure.Swap(false) {
292254
return nil
293255
}
@@ -302,7 +264,7 @@ func TestBackfillWithProtectedTS(t *testing.T) {
302264
// Detect the first scan on table from the backfill, which is
303265
// after the PTS has been set up. This knob is for testing CREATE
304266
// MATERIALIZED VIEW.
305-
if blockBackFillsForPTSCheck.Load() && !usingDeclarativeSchemaChanger.Load() &&
267+
if blockBackFillsForPTSCheck.Load() &&
306268
request.Txn != nil &&
307269
request.Txn.Name == "schemaChangerBackfill" &&
308270
request.Requests[0].GetInner().Method() == kvpb.Scan {
@@ -380,35 +342,23 @@ func TestBackfillWithProtectedTS(t *testing.T) {
380342
const rowsAddedPerIteration = 1
381343

382344
for _, tc := range []struct {
383-
name string
384-
tableName string
385-
backfillSchemaChange string
386-
jobDescriptionPrefix string
387-
postTestQuery string
388-
expectedCount int
389-
usingDeclSchemaChanger bool
345+
name string
346+
tableName string
347+
backfillSchemaChange string
348+
jobDescriptionPrefix string
349+
postTestQuery string
350+
expectedCount int
390351
}{
391352
{
392-
name: "create materialized view",
393-
tableName: "t_mat_view",
394-
backfillSchemaChange: "CREATE MATERIALIZED VIEW test AS (SELECT n from t_mat_view)",
395-
jobDescriptionPrefix: "CREATE MATERIALIZED VIEW",
396-
postTestQuery: "SELECT count(*) FROM test",
397-
expectedCount: initialRowCount - rowsDeletedPerIteration + rowsAddedPerIteration,
398-
usingDeclSchemaChanger: false,
399-
},
400-
{
401-
name: "create index",
402-
tableName: "t_idx",
403-
backfillSchemaChange: "CREATE INDEX idx ON t_idx(n)",
404-
jobDescriptionPrefix: "CREATE INDEX idx",
405-
postTestQuery: "SELECT count(*) FROM t_idx@idx",
406-
expectedCount: initialRowCount - 2*rowsDeletedPerIteration + 2*rowsAddedPerIteration,
407-
usingDeclSchemaChanger: true,
353+
name: "create materialized view",
354+
tableName: "t_mat_view",
355+
backfillSchemaChange: "CREATE MATERIALIZED VIEW test AS (SELECT n from t_mat_view)",
356+
jobDescriptionPrefix: "CREATE MATERIALIZED VIEW",
357+
postTestQuery: "SELECT count(*) FROM test",
358+
expectedCount: initialRowCount - rowsDeletedPerIteration + rowsAddedPerIteration,
408359
},
409360
} {
410361
t.Run(tc.name, func(t *testing.T) {
411-
usingDeclarativeSchemaChanger.Store(tc.usingDeclSchemaChanger)
412362
for _, sql := range []string{
413363
fmt.Sprintf("CREATE TABLE %s(n int primary key)", tc.tableName),
414364
fmt.Sprintf("ALTER TABLE %s CONFIGURE ZONE USING range_min_bytes = 0, range_max_bytes = 67108864, gc.ttlseconds = 5", tc.tableName),

pkg/sql/distsql_plan_backfill.go

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -40,7 +40,7 @@ func initColumnBackfillerSpec(
4040

4141
func initIndexBackfillerSpec(
4242
desc descpb.TableDescriptor,
43-
writeAsOf, readAsOf hlc.Timestamp,
43+
writeAsOf hlc.Timestamp,
4444
writeAtBatchTimestamp bool,
4545
chunkSize int64,
4646
indexesToBackfill []descpb.IndexID,
@@ -50,7 +50,6 @@ func initIndexBackfillerSpec(
5050
Table: desc,
5151
WriteAsOf: writeAsOf,
5252
WriteAtBatchTimestamp: writeAtBatchTimestamp,
53-
ReadAsOf: readAsOf,
5453
Type: execinfrapb.BackfillerSpec_Index,
5554
ChunkSize: chunkSize,
5655
IndexesToBackfill: indexesToBackfill,

pkg/sql/execinfrapb/processors_bulk_io.proto

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -61,9 +61,9 @@ message BackfillerSpec {
6161
optional uint64 update_chunk_size_threshold_bytes = 14 [(gogoproto.nullable) = false];
6262

6363
// WriteAsOf is the time that the backfill entries should be written.
64-
// Note: Older nodes may also use this as the read time instead of readAsOf.
6564
optional util.hlc.Timestamp writeAsOf = 7 [(gogoproto.nullable) = false];
6665
// The timestamp to perform index backfill historical scans at.
66+
// This is only used by the column backfiller.
6767
optional util.hlc.Timestamp readAsOf = 9 [(gogoproto.nullable) = false];
6868

6969
// IndexesToBackfill is the set of indexes to backfill. This is populated only

pkg/sql/index_backfiller.go

Lines changed: 11 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,6 @@ import (
2323
"github.com/cockroachdb/cockroach/pkg/util/admission/admissionpb"
2424
"github.com/cockroachdb/cockroach/pkg/util/hlc"
2525
"github.com/cockroachdb/cockroach/pkg/util/syncutil"
26-
"github.com/cockroachdb/errors"
2726
)
2827

2928
// IndexBackfillPlanner holds dependencies for an index backfiller
@@ -44,23 +43,19 @@ func (ib *IndexBackfillPlanner) MaybePrepareDestIndexesForBackfill(
4443
if !current.MinimumWriteTimestamp.IsEmpty() {
4544
return current, nil
4645
}
47-
// Pick an arbitrary read timestamp for the reads of the backfill.
48-
// It's safe to use any timestamp to read even if we've partially backfilled
49-
// at an earlier timestamp because other writing transactions have been
50-
// writing at the appropriate timestamps in-between.
51-
backfillReadTimestamp := ib.execCfg.Clock.Now()
46+
minWriteTimestamp := ib.execCfg.Clock.Now()
5247
targetSpans := make([]roachpb.Span, len(current.DestIndexIDs))
5348
for i, idxID := range current.DestIndexIDs {
5449
targetSpans[i] = td.IndexSpan(ib.execCfg.Codec, idxID)
5550
}
5651
if err := scanTargetSpansToPushTimestampCache(
57-
ctx, ib.execCfg.DB, backfillReadTimestamp, targetSpans,
52+
ctx, ib.execCfg.DB, minWriteTimestamp, targetSpans,
5853
); err != nil {
5954
return scexec.BackfillProgress{}, err
6055
}
6156
return scexec.BackfillProgress{
6257
Backfill: current.Backfill,
63-
MinimumWriteTimestamp: backfillReadTimestamp,
58+
MinimumWriteTimestamp: minWriteTimestamp,
6459
}, nil
6560
}
6661

@@ -72,19 +67,6 @@ func (ib *IndexBackfillPlanner) BackfillIndexes(
7267
job *jobs.Job,
7368
descriptor catalog.TableDescriptor,
7469
) (retErr error) {
75-
// Potentially install a protected timestamp before the GC interval is hit,
76-
// which can help avoid transaction retry errors, with shorter GC intervals.
77-
protectedTimestampCleaner := ib.execCfg.ProtectedTimestampManager.TryToProtectBeforeGC(ctx,
78-
job,
79-
descriptor,
80-
progress.MinimumWriteTimestamp)
81-
defer func() {
82-
cleanupError := protectedTimestampCleaner(ctx)
83-
if cleanupError != nil {
84-
retErr = errors.CombineErrors(retErr, cleanupError)
85-
}
86-
}()
87-
8870
var completed = struct {
8971
syncutil.Mutex
9072
g roachpb.SpanGroup
@@ -117,12 +99,17 @@ func (ib *IndexBackfillPlanner) BackfillIndexes(
11799
return nil
118100
}
119101
now := ib.execCfg.DB.Clock().Now()
102+
// Pick now as the read timestamp for the backfill. It's safe to use this
103+
// timestamp to read even if we've partially backfilled at an earlier
104+
// timestamp because other writing transactions have been writing at the
105+
// appropriate timestamps in-between.
106+
readAsOf := now
120107
run, retErr := ib.plan(
121108
ctx,
122109
descriptor,
123110
now,
124111
progress.MinimumWriteTimestamp,
125-
progress.MinimumWriteTimestamp,
112+
readAsOf,
126113
spansToDo,
127114
progress.DestIndexIDs,
128115
progress.SourceIndexID,
@@ -197,9 +184,8 @@ func (ib *IndexBackfillPlanner) plan(
197184
chunkSize := indexBackfillBatchSize.Get(&ib.execCfg.Settings.SV)
198185
const writeAtRequestTimestamp = true
199186
spec, err := initIndexBackfillerSpec(
200-
*td.TableDesc(), writeAsOf, readAsOf, writeAtRequestTimestamp, chunkSize,
201-
indexesToBackfill,
202-
sourceIndexID,
187+
*td.TableDesc(), writeAsOf, writeAtRequestTimestamp, chunkSize,
188+
indexesToBackfill, sourceIndexID,
203189
)
204190
if err != nil {
205191
return err

0 commit comments

Comments
 (0)