@@ -433,7 +433,15 @@ func (twb *txnWriteBuffer) validateRequests(ba *kvpb.BatchRequest) error {
433
433
if t .OriginTimestamp .IsSet () {
434
434
return unsupportedOptionError (t .Method (), "OriginTimestamp" )
435
435
}
436
+ assertTrue (ba .MaxSpanRequestKeys == 0 && ba .TargetBytes == 0 , "unexpectedly found CPut in a BatchRequest with a limit" )
436
437
case * kvpb.PutRequest :
438
+ // TODO(yuzefovich): the DistSender allows Puts to be in batches
439
+ // with limits, which can happen when we're forced to flush the
440
+ // buffered Puts, and the batch we piggy-back on has a limit set.
441
+ // However, SQL never constructs such a batch on its own, so we're
442
+ // asserting the expectations from SQL. Figure out how to reconcile
443
+ // this with more permissive DistSender-level checks.
444
+ assertTrue (ba .MaxSpanRequestKeys == 0 && ba .TargetBytes == 0 , "unexpectedly found Put in a BatchRequest with a limit" )
437
445
case * kvpb.DeleteRequest :
438
446
case * kvpb.GetRequest :
439
447
// ReturnRawMVCCValues is unsupported because we don't know how to serve
@@ -1379,6 +1387,11 @@ func (rr requestRecord) toResp(
1379
1387
// We only use the response from KV if there wasn't already a
1380
1388
// buffered value for this key that our transaction wrote
1381
1389
// previously.
1390
+ // TODO(yuzefovich): for completeness, we should check whether
1391
+ // ResumeSpan is non-nil, in which case the response from KV is
1392
+ // incomplete. This can happen when MaxSpanRequestKeys and/or
1393
+ // TargetBytes limits are set on the batch, and SQL currently
1394
+ // doesn't do that for batches with CPuts.
1382
1395
val = br .GetInner ().(* kvpb.GetResponse ).Value
1383
1396
}
1384
1397
@@ -1410,6 +1423,11 @@ func (rr requestRecord) toResp(
1410
1423
1411
1424
case * kvpb.PutRequest :
1412
1425
var dla * bufferedDurableLockAcquisition
1426
+ // TODO(yuzefovich): for completeness, we should check whether
1427
+ // ResumeSpan is non-nil if we transformed the request, in which case
1428
+ // the response from KV is incomplete. This can happen when
1429
+ // MaxSpanRequestKeys and/or TargetBytes limits are set on the batch,
1430
+ // and SQL currently doesn't do that for batches with Puts.
1413
1431
if rr .transformed && exclusionTimestampRequired {
1414
1432
dla = & bufferedDurableLockAcquisition {
1415
1433
str : lock .Exclusive ,
@@ -1423,19 +1441,20 @@ func (rr requestRecord) toResp(
1423
1441
case * kvpb.DeleteRequest :
1424
1442
// To correctly populate FoundKey in the response, we must prefer any
1425
1443
// buffered values (if they exist).
1426
- var foundKey bool
1444
+ var resp kvpb. DeleteResponse
1427
1445
val , _ , served := twb .maybeServeRead (req .Key , req .Sequence )
1428
1446
if served {
1429
1447
log .VEventf (ctx , 2 , "serving read portion of %s on key %s from the buffer" , req .Method (), req .Key )
1430
- foundKey = val .IsPresent ()
1448
+ resp . FoundKey = val .IsPresent ()
1431
1449
} else if rr .transformed {
1432
1450
// We sent a GetRequest to the KV layer to acquire an exclusive lock
1433
1451
// on the key, populate FoundKey using the response.
1434
1452
getResp := br .GetInner ().(* kvpb.GetResponse )
1435
1453
if log .ExpensiveLogEnabled (ctx , 2 ) {
1436
1454
log .Eventf (ctx , "synthesizing DeleteResponse from GetResponse: %#v" , getResp )
1437
1455
}
1438
- foundKey = getResp .Value .IsPresent ()
1456
+ resp .FoundKey = getResp .Value .IsPresent ()
1457
+ resp .ResumeSpan = getResp .ResumeSpan
1439
1458
} else {
1440
1459
// NB: If MustAcquireExclusiveLock wasn't set by the client then we
1441
1460
// eschew sending a Get request to the KV layer just to populate
@@ -1447,7 +1466,14 @@ func (rr requestRecord) toResp(
1447
1466
// TODO(arul): improve the FoundKey semantics to have callers opt
1448
1467
// into whether the care about the key being found. Alternatively,
1449
1468
// clarify the behaviour on DeleteRequest.
1450
- foundKey = false
1469
+ resp .FoundKey = false
1470
+ }
1471
+
1472
+ ru .MustSetInner (& resp )
1473
+ if resp .ResumeSpan != nil {
1474
+ // When the Get was incomplete, we haven't actually processed this
1475
+ // Del, so we cannot buffer the write.
1476
+ break
1451
1477
}
1452
1478
1453
1479
var dla * bufferedDurableLockAcquisition
@@ -1459,14 +1485,16 @@ func (rr requestRecord) toResp(
1459
1485
}
1460
1486
}
1461
1487
1462
- ru .MustSetInner (& kvpb.DeleteResponse {
1463
- FoundKey : foundKey ,
1464
- })
1465
1488
twb .addToBuffer (req .Key , roachpb.Value {}, req .Sequence , req .KVNemesisSeq , dla )
1466
1489
1467
1490
case * kvpb.GetRequest :
1468
1491
val , _ , served := twb .maybeServeRead (req .Key , req .Sequence )
1469
1492
if served {
1493
+ // TODO(yuzefovich): we're effectively ignoring the limits of
1494
+ // BatchRequest when serving the Get from the buffer. We should
1495
+ // consider setting the ResumeSpan if a limit has already been
1496
+ // reached by this point. This will force us to set ResumeSpan on
1497
+ // all remaining requests in the batch.
1470
1498
getResp := & kvpb.GetResponse {}
1471
1499
if val .IsPresent () {
1472
1500
getResp .Value = val
@@ -2447,8 +2475,6 @@ func (s *respIter) startKey() roachpb.Key {
2447
2475
// For ReverseScans, the EndKey of the ResumeSpan is updated to indicate the
2448
2476
// start key for the "next" page, which is exactly the last key that was
2449
2477
// reverse-scanned for the current response.
2450
- // TODO(yuzefovich): we should have some unit tests that exercise the
2451
- // ResumeSpan case.
2452
2478
if s .resumeSpan != nil {
2453
2479
return s .resumeSpan .EndKey
2454
2480
}
@@ -2519,6 +2545,11 @@ func makeRespSizeHelper(it *respIter) respSizeHelper {
2519
2545
}
2520
2546
2521
2547
func (h * respSizeHelper ) acceptBuffer (key roachpb.Key , value * roachpb.Value ) {
2548
+ // TODO(yuzefovich): we're effectively ignoring the limits of BatchRequest
2549
+ // when serving the reads from the buffer. We should consider checking how
2550
+ // many keys and bytes have already been included to see whether we've
2551
+ // reached a limit, and set the ResumeSpan if so (which can result in some
2552
+ // wasted work by the server).
2522
2553
h .numKeys ++
2523
2554
lenKV , _ := encKVLength (key , value )
2524
2555
h .numBytes += int64 (lenKV )
0 commit comments