@@ -423,7 +423,15 @@ func (twb *txnWriteBuffer) validateRequests(ba *kvpb.BatchRequest) error {
423
423
if t .OriginTimestamp .IsSet () {
424
424
return unsupportedOptionError (t .Method (), "OriginTimestamp" )
425
425
}
426
+ assertTrue (ba .MaxSpanRequestKeys == 0 && ba .TargetBytes == 0 , "unexpectedly found CPut in a BatchRequest with a limit" )
426
427
case * kvpb.PutRequest :
428
+ // TODO(yuzefovich): the DistSender allows Puts to be in batches
429
+ // with limits, which can happen when we're forced to flush the
430
+ // buffered Puts, and the batch we piggy-back on has a limit set.
431
+ // However, SQL never constructs such a batch on its own, so we're
432
+ // asserting the expectations from SQL. Figure out how to reconcile
433
+ // this with more permissive DistSender-level checks.
434
+ assertTrue (ba .MaxSpanRequestKeys == 0 && ba .TargetBytes == 0 , "unexpectedly found Put in a BatchRequest with a limit" )
427
435
case * kvpb.DeleteRequest :
428
436
case * kvpb.GetRequest :
429
437
// ReturnRawMVCCValues is unsupported because we don't know how to serve
@@ -1366,6 +1374,11 @@ func (rr requestRecord) toResp(
1366
1374
// We only use the response from KV if there wasn't already a
1367
1375
// buffered value for this key that our transaction wrote
1368
1376
// previously.
1377
+ // TODO(yuzefovich): for completeness, we should check whether
1378
+ // ResumeSpan is non-nil, in which case the response from KV is
1379
+ // incomplete. This can happen when MaxSpanRequestKeys and/or
1380
+ // TargetBytes limits are set on the batch, and SQL currently
1381
+ // doesn't do that for batches with CPuts.
1369
1382
val = br .GetInner ().(* kvpb.GetResponse ).Value
1370
1383
}
1371
1384
@@ -1397,6 +1410,11 @@ func (rr requestRecord) toResp(
1397
1410
1398
1411
case * kvpb.PutRequest :
1399
1412
var dla * bufferedDurableLockAcquisition
1413
+ // TODO(yuzefovich): for completeness, we should check whether
1414
+ // ResumeSpan is non-nil if we transformed the request, in which case
1415
+ // the response from KV is incomplete. This can happen when
1416
+ // MaxSpanRequestKeys and/or TargetBytes limits are set on the batch,
1417
+ // and SQL currently doesn't do that for batches with Puts.
1400
1418
if rr .transformed && exclusionTimestampRequired {
1401
1419
dla = & bufferedDurableLockAcquisition {
1402
1420
str : lock .Exclusive ,
@@ -1410,19 +1428,20 @@ func (rr requestRecord) toResp(
1410
1428
case * kvpb.DeleteRequest :
1411
1429
// To correctly populate FoundKey in the response, we must prefer any
1412
1430
// buffered values (if they exist).
1413
- var foundKey bool
1431
+ var resp kvpb. DeleteResponse
1414
1432
val , _ , served := twb .maybeServeRead (req .Key , req .Sequence )
1415
1433
if served {
1416
1434
log .VEventf (ctx , 2 , "serving read portion of %s on key %s from the buffer" , req .Method (), req .Key )
1417
- foundKey = val .IsPresent ()
1435
+ resp . FoundKey = val .IsPresent ()
1418
1436
} else if rr .transformed {
1419
1437
// We sent a GetRequest to the KV layer to acquire an exclusive lock
1420
1438
// on the key, populate FoundKey using the response.
1421
1439
getResp := br .GetInner ().(* kvpb.GetResponse )
1422
1440
if log .ExpensiveLogEnabled (ctx , 2 ) {
1423
1441
log .Eventf (ctx , "synthesizing DeleteResponse from GetResponse: %#v" , getResp )
1424
1442
}
1425
- foundKey = getResp .Value .IsPresent ()
1443
+ resp .FoundKey = getResp .Value .IsPresent ()
1444
+ resp .ResumeSpan = getResp .ResumeSpan
1426
1445
} else {
1427
1446
// NB: If MustAcquireExclusiveLock wasn't set by the client then we
1428
1447
// eschew sending a Get request to the KV layer just to populate
@@ -1434,7 +1453,14 @@ func (rr requestRecord) toResp(
1434
1453
// TODO(arul): improve the FoundKey semantics to have callers opt
1435
1454
// into whether the care about the key being found. Alternatively,
1436
1455
// clarify the behaviour on DeleteRequest.
1437
- foundKey = false
1456
+ resp .FoundKey = false
1457
+ }
1458
+
1459
+ ru .MustSetInner (& resp )
1460
+ if resp .ResumeSpan != nil {
1461
+ // When the Get was incomplete, we haven't actually processed this
1462
+ // Del, so we cannot buffer the write.
1463
+ break
1438
1464
}
1439
1465
1440
1466
var dla * bufferedDurableLockAcquisition
@@ -1446,14 +1472,16 @@ func (rr requestRecord) toResp(
1446
1472
}
1447
1473
}
1448
1474
1449
- ru .MustSetInner (& kvpb.DeleteResponse {
1450
- FoundKey : foundKey ,
1451
- })
1452
1475
twb .addToBuffer (req .Key , roachpb.Value {}, req .Sequence , req .KVNemesisSeq , dla )
1453
1476
1454
1477
case * kvpb.GetRequest :
1455
1478
val , _ , served := twb .maybeServeRead (req .Key , req .Sequence )
1456
1479
if served {
1480
+ // TODO(yuzefovich): we're effectively ignoring the limits of
1481
+ // BatchRequest when serving the Get from the buffer. We should
1482
+ // consider setting the ResumeSpan if a limit has already been
1483
+ // reached by this point. This will force us to set ResumeSpan on
1484
+ // all remaining requests in the batch.
1457
1485
getResp := & kvpb.GetResponse {}
1458
1486
if val .IsPresent () {
1459
1487
getResp .Value = val
@@ -2403,8 +2431,6 @@ func (s *respIter) startKey() roachpb.Key {
2403
2431
// For ReverseScans, the EndKey of the ResumeSpan is updated to indicate the
2404
2432
// start key for the "next" page, which is exactly the last key that was
2405
2433
// reverse-scanned for the current response.
2406
- // TODO(yuzefovich): we should have some unit tests that exercise the
2407
- // ResumeSpan case.
2408
2434
if s .resumeSpan != nil {
2409
2435
return s .resumeSpan .EndKey
2410
2436
}
@@ -2475,6 +2501,11 @@ func makeRespSizeHelper(it *respIter) respSizeHelper {
2475
2501
}
2476
2502
2477
2503
func (h * respSizeHelper ) acceptBuffer (key roachpb.Key , value * roachpb.Value ) {
2504
+ // TODO(yuzefovich): we're effectively ignoring the limits of BatchRequest
2505
+ // when serving the reads from the buffer. We should consider checking how
2506
+ // many keys and bytes have already been included to see whether we've
2507
+ // reached a limit, and set the ResumeSpan if so (which can result in some
2508
+ // wasted work by the server).
2478
2509
h .numKeys ++
2479
2510
lenKV , _ := encKVLength (key , value )
2480
2511
h .numBytes += int64 (lenKV )
0 commit comments