@@ -440,7 +440,15 @@ func (twb *txnWriteBuffer) validateRequests(ba *kvpb.BatchRequest) error {
440
440
if t .OriginTimestamp .IsSet () {
441
441
return unsupportedOptionError (t .Method (), "OriginTimestamp" )
442
442
}
443
+ assertTrue (ba .MaxSpanRequestKeys == 0 && ba .TargetBytes == 0 , "unexpectedly found CPut in a BatchRequest with a limit" )
443
444
case * kvpb.PutRequest :
445
+ // TODO(yuzefovich): the DistSender allows Puts to be in batches
446
+ // with limits, which can happen when we're forced to flush the
447
+ // buffered Puts, and the batch we piggy-back on has a limit set.
448
+ // However, SQL never constructs such a batch on its own, so we're
449
+ // asserting the expectations from SQL. Figure out how to reconcile
450
+ // this with more permissive DistSender-level checks.
451
+ assertTrue (ba .MaxSpanRequestKeys == 0 && ba .TargetBytes == 0 , "unexpectedly found Put in a BatchRequest with a limit" )
444
452
case * kvpb.DeleteRequest :
445
453
case * kvpb.GetRequest :
446
454
// ReturnRawMVCCValues is unsupported because we don't know how to serve
@@ -1383,6 +1391,11 @@ func (rr requestRecord) toResp(
1383
1391
// We only use the response from KV if there wasn't already a
1384
1392
// buffered value for this key that our transaction wrote
1385
1393
// previously.
1394
+ // TODO(yuzefovich): for completeness, we should check whether
1395
+ // ResumeSpan is non-nil, in which case the response from KV is
1396
+ // incomplete. This can happen when MaxSpanRequestKeys and/or
1397
+ // TargetBytes limits are set on the batch, and SQL currently
1398
+ // doesn't do that for batches with CPuts.
1386
1399
val = br .GetInner ().(* kvpb.GetResponse ).Value
1387
1400
}
1388
1401
@@ -1414,6 +1427,11 @@ func (rr requestRecord) toResp(
1414
1427
1415
1428
case * kvpb.PutRequest :
1416
1429
var dla * bufferedDurableLockAcquisition
1430
+ // TODO(yuzefovich): for completeness, we should check whether
1431
+ // ResumeSpan is non-nil if we transformed the request, in which case
1432
+ // the response from KV is incomplete. This can happen when
1433
+ // MaxSpanRequestKeys and/or TargetBytes limits are set on the batch,
1434
+ // and SQL currently doesn't do that for batches with Puts.
1417
1435
if rr .transformed && exclusionTimestampRequired {
1418
1436
dla = & bufferedDurableLockAcquisition {
1419
1437
str : lock .Exclusive ,
@@ -1427,19 +1445,20 @@ func (rr requestRecord) toResp(
1427
1445
case * kvpb.DeleteRequest :
1428
1446
// To correctly populate FoundKey in the response, we must prefer any
1429
1447
// buffered values (if they exist).
1430
- var foundKey bool
1448
+ var resp kvpb. DeleteResponse
1431
1449
val , _ , served := twb .maybeServeRead (req .Key , req .Sequence )
1432
1450
if served {
1433
1451
log .VEventf (ctx , 2 , "serving read portion of %s on key %s from the buffer" , req .Method (), req .Key )
1434
- foundKey = val .IsPresent ()
1452
+ resp . FoundKey = val .IsPresent ()
1435
1453
} else if rr .transformed {
1436
1454
// We sent a GetRequest to the KV layer to acquire an exclusive lock
1437
1455
// on the key, populate FoundKey using the response.
1438
1456
getResp := br .GetInner ().(* kvpb.GetResponse )
1439
1457
if log .ExpensiveLogEnabled (ctx , 2 ) {
1440
1458
log .Eventf (ctx , "synthesizing DeleteResponse from GetResponse: %#v" , getResp )
1441
1459
}
1442
- foundKey = getResp .Value .IsPresent ()
1460
+ resp .FoundKey = getResp .Value .IsPresent ()
1461
+ resp .ResumeSpan = getResp .ResumeSpan
1443
1462
} else {
1444
1463
// NB: If MustAcquireExclusiveLock wasn't set by the client then we
1445
1464
// eschew sending a Get request to the KV layer just to populate
@@ -1451,7 +1470,14 @@ func (rr requestRecord) toResp(
1451
1470
// TODO(arul): improve the FoundKey semantics to have callers opt
1452
1471
// into whether the care about the key being found. Alternatively,
1453
1472
// clarify the behaviour on DeleteRequest.
1454
- foundKey = false
1473
+ resp .FoundKey = false
1474
+ }
1475
+
1476
+ ru .MustSetInner (& resp )
1477
+ if resp .ResumeSpan != nil {
1478
+ // When the Get was incomplete, we haven't actually processed this
1479
+ // Del, so we cannot buffer the write.
1480
+ break
1455
1481
}
1456
1482
1457
1483
var dla * bufferedDurableLockAcquisition
@@ -1463,14 +1489,16 @@ func (rr requestRecord) toResp(
1463
1489
}
1464
1490
}
1465
1491
1466
- ru .MustSetInner (& kvpb.DeleteResponse {
1467
- FoundKey : foundKey ,
1468
- })
1469
1492
twb .addToBuffer (req .Key , roachpb.Value {}, req .Sequence , req .KVNemesisSeq , dla )
1470
1493
1471
1494
case * kvpb.GetRequest :
1472
1495
val , _ , served := twb .maybeServeRead (req .Key , req .Sequence )
1473
1496
if served {
1497
+ // TODO(yuzefovich): we're effectively ignoring the limits of
1498
+ // BatchRequest when serving the Get from the buffer. We should
1499
+ // consider setting the ResumeSpan if a limit has already been
1500
+ // reached by this point. This will force us to set ResumeSpan on
1501
+ // all remaining requests in the batch.
1474
1502
getResp := & kvpb.GetResponse {}
1475
1503
if val .IsPresent () {
1476
1504
getResp .Value = val
@@ -2420,8 +2448,6 @@ func (s *respIter) startKey() roachpb.Key {
2420
2448
// For ReverseScans, the EndKey of the ResumeSpan is updated to indicate the
2421
2449
// start key for the "next" page, which is exactly the last key that was
2422
2450
// reverse-scanned for the current response.
2423
- // TODO(yuzefovich): we should have some unit tests that exercise the
2424
- // ResumeSpan case.
2425
2451
if s .resumeSpan != nil {
2426
2452
return s .resumeSpan .EndKey
2427
2453
}
@@ -2492,6 +2518,11 @@ func makeRespSizeHelper(it *respIter) respSizeHelper {
2492
2518
}
2493
2519
2494
2520
func (h * respSizeHelper ) acceptBuffer (key roachpb.Key , value * roachpb.Value ) {
2521
+ // TODO(yuzefovich): we're effectively ignoring the limits of BatchRequest
2522
+ // when serving the reads from the buffer. We should consider checking how
2523
+ // many keys and bytes have already been included to see whether we've
2524
+ // reached a limit, and set the ResumeSpan if so (which can result in some
2525
+ // wasted work by the server).
2495
2526
h .numKeys ++
2496
2527
lenKV , _ := encKVLength (key , value )
2497
2528
h .numBytes += int64 (lenKV )
0 commit comments