@@ -566,67 +566,76 @@ func (h *txnHeartbeater) abortTxnAsyncLocked(ctx context.Context) {
566
566
567
567
const taskName = "txnHeartbeater: aborting txn"
568
568
log .VEventf (ctx , 2 , "async abort for txn: %s" , txn )
569
- if err := h .stopper .RunAsyncTask (h .AnnotateCtx (context .Background ()), taskName ,
570
- func (ctx context.Context ) {
571
- if err := timeutil .RunWithTimeout (ctx , taskName , abortTxnAsyncTimeout ,
572
- func (ctx context.Context ) error {
573
- h .mu .Lock ()
574
- defer h .mu .Unlock ()
575
-
576
- // If we find an abortTxnAsyncResultC, that means an async
577
- // rollback request is already in flight, so there's no
578
- // point in us running another. This can happen because the
579
- // TxnCoordSender also calls abortTxnAsyncLocked()
580
- // independently of the heartbeat loop.
581
- if h .mu .abortTxnAsyncResultC != nil {
582
- log .VEventf (ctx , 2 ,
583
- "skipping async abort due to concurrent async abort for %s" , txn )
584
- return nil
585
- }
586
-
587
- // TxnCoordSender allows EndTxn(commit=false) through even
588
- // after we set finalObservedStatus, and that request can
589
- // race with us for the mutex. Thus, if we find an in-flight
590
- // request here, after checking ifReqs=0 before being spawned,
591
- // we deduce that it must have been a rollback and there's no
592
- // point in sending another rollback.
593
- if h .mu .ifReqs > 0 {
594
- log .VEventf (ctx , 2 ,
595
- "skipping async abort due to client rollback for %s" , txn )
596
- return nil
597
- }
598
-
599
- // Set up a result channel to signal to an incoming client
600
- // rollback that an async rollback is already in progress,
601
- // and pass it the result. The buffer allows storing the
602
- // result even when no client rollback arrives. Recall that
603
- // the SendLocked() call below releases the mutex while
604
- // running, allowing concurrent incoming requests.
605
- h .mu .abortTxnAsyncResultC = make (chan abortTxnAsyncResult , 1 )
606
-
607
- // Send the abort request through the interceptor stack. This is
608
- // important because we need the txnPipeliner to append lock spans
609
- // to the EndTxn request.
610
- br , pErr := h .wrapped .SendLocked (ctx , ba )
611
- if pErr != nil {
612
- log .VErrEventf (ctx , 1 , "async abort failed for %s: %s " , txn , pErr )
613
- h .metrics .AsyncRollbacksFailed .Inc (1 )
614
- }
615
-
616
- // Pass the result to a waiting client rollback, if any, and
617
- // remove the channel since we're no longer in flight.
618
- h .mu .abortTxnAsyncResultC <- abortTxnAsyncResult {br : br , pErr : pErr }
619
- h .mu .abortTxnAsyncResultC = nil
569
+
570
+ work := func (ctx context.Context ) {
571
+ if err := timeutil .RunWithTimeout (ctx , taskName , abortTxnAsyncTimeout ,
572
+ func (ctx context.Context ) error {
573
+ h .mu .Lock ()
574
+ defer h .mu .Unlock ()
575
+
576
+ // If we find an abortTxnAsyncResultC, that means an async
577
+ // rollback request is already in flight, so there's no
578
+ // point in us running another. This can happen because the
579
+ // TxnCoordSender also calls abortTxnAsyncLocked()
580
+ // independently of the heartbeat loop.
581
+ if h .mu .abortTxnAsyncResultC != nil {
582
+ log .VEventf (ctx , 2 ,
583
+ "skipping async abort due to concurrent async abort for %s" , txn )
620
584
return nil
621
- },
622
- ); err != nil {
623
- log .VEventf (ctx , 1 , "async abort failed for %s: %s" , txn , err )
624
- }
625
- },
626
- ); err != nil {
585
+ }
586
+
587
+ // TxnCoordSender allows EndTxn(commit=false) through even
588
+ // after we set finalObservedStatus, and that request can
589
+ // race with us for the mutex. Thus, if we find an in-flight
590
+ // request here, after checking ifReqs=0 before being spawned,
591
+ // we deduce that it must have been a rollback and there's no
592
+ // point in sending another rollback.
593
+ if h .mu .ifReqs > 0 {
594
+ log .VEventf (ctx , 2 ,
595
+ "skipping async abort due to client rollback for %s" , txn )
596
+ return nil
597
+ }
598
+
599
+ // Set up a result channel to signal to an incoming client
600
+ // rollback that an async rollback is already in progress,
601
+ // and pass it the result. The buffer allows storing the
602
+ // result even when no client rollback arrives. Recall that
603
+ // the SendLocked() call below releases the mutex while
604
+ // running, allowing concurrent incoming requests.
605
+ h .mu .abortTxnAsyncResultC = make (chan abortTxnAsyncResult , 1 )
606
+
607
+ // Send the abort request through the interceptor stack. This is
608
+ // important because we need the txnPipeliner to append lock spans
609
+ // to the EndTxn request.
610
+ br , pErr := h .wrapped .SendLocked (ctx , ba )
611
+ if pErr != nil {
612
+ log .VErrEventf (ctx , 1 , "async abort failed for %s: %s " , txn , pErr )
613
+ h .metrics .AsyncRollbacksFailed .Inc (1 )
614
+ }
615
+
616
+ // Pass the result to a waiting client rollback, if any, and
617
+ // remove the channel since we're no longer in flight.
618
+ h .mu .abortTxnAsyncResultC <- abortTxnAsyncResult {br : br , pErr : pErr }
619
+ h .mu .abortTxnAsyncResultC = nil
620
+ return nil
621
+ },
622
+ ); err != nil {
623
+ log .VEventf (ctx , 1 , "async abort failed for %s: %s" , txn , err )
624
+ }
625
+ }
626
+
627
+ asyncCtx , hdl , err := h .stopper .GetHandle (h .AnnotateCtx (context .Background ()), stop.TaskOpts {
628
+ TaskName : taskName ,
629
+ })
630
+ if err != nil {
627
631
log .Warningf (ctx , "%v" , err )
628
632
h .metrics .AsyncRollbacksFailed .Inc (1 )
633
+ return
629
634
}
635
+ go func (ctx context.Context ) {
636
+ defer hdl .Activate (ctx ).Release (ctx )
637
+ work (ctx )
638
+ }(asyncCtx )
630
639
}
631
640
632
641
// randLockingIndex returns the index of the first request that acquires locks
0 commit comments