@@ -270,6 +270,7 @@ func runFailoverChaos(ctx context.Context, t test.Test, c cluster.Cluster, readO
270270 c .Start (ctx , t .L (), failoverStartOpts (), settings , c .CRDBNodes ())
271271
272272 conn := c .Conn (ctx , t .L (), 1 )
273+ setMaxLifetime (conn )
273274
274275 // Place 5 replicas of all ranges on n3-n9, keeping n1-n2 as SQL gateways.
275276 configureAllZones (t , ctx , conn , zoneConfig {replicas : 5 , onlyNodes : []int {3 , 4 , 5 , 6 , 7 , 8 , 9 }})
@@ -455,6 +456,7 @@ func runFailoverPartialLeaseGateway(ctx context.Context, t test.Test, c cluster.
455456 c .Start (ctx , t .L (), failoverStartOpts (), settings , c .CRDBNodes ())
456457
457458 conn := c .Conn (ctx , t .L (), 1 )
459+ setMaxLifetime (conn )
458460
459461 // Place all ranges on n1-n3 to start with.
460462 configureAllZones (t , ctx , conn , zoneConfig {replicas : 3 , onlyNodes : []int {1 , 2 , 3 }})
@@ -600,6 +602,7 @@ func runFailoverPartialLeaseLeader(ctx context.Context, t test.Test, c cluster.C
600602 c .Start (ctx , t .L (), failoverStartOpts (), settings , c .Range (1 , 3 ))
601603
602604 conn := c .Conn (ctx , t .L (), 1 )
605+ setMaxLifetime (conn )
603606
604607 // Place all ranges on n1-n3 to start with, and wait for upreplication.
605608 configureAllZones (t , ctx , conn , zoneConfig {replicas : 3 , onlyNodes : []int {1 , 2 , 3 }})
@@ -721,6 +724,7 @@ func runFailoverPartialLeaseLiveness(ctx context.Context, t test.Test, c cluster
721724 c .Start (ctx , t .L (), failoverStartOpts (), settings , c .CRDBNodes ())
722725
723726 conn := c .Conn (ctx , t .L (), 1 )
727+ setMaxLifetime (conn )
724728
725729 // Place all ranges on n1-n3, and an extra liveness leaseholder replica on n4.
726730 configureAllZones (t , ctx , conn , zoneConfig {replicas : 3 , onlyNodes : []int {1 , 2 , 3 }})
@@ -837,6 +841,7 @@ func runFailoverNonSystem(
837841 c .Start (ctx , t .L (), failoverStartOpts (), settings , c .CRDBNodes ())
838842
839843 conn := c .Conn (ctx , t .L (), 1 )
844+ setMaxLifetime (conn )
840845
841846 // Constrain all existing zone configs to n1-n3.
842847 configureAllZones (t , ctx , conn , zoneConfig {replicas : 3 , onlyNodes : []int {1 , 2 , 3 }})
@@ -944,6 +949,7 @@ func runFailoverLiveness(
944949 c .Start (ctx , t .L (), failoverStartOpts (), settings , c .CRDBNodes ())
945950
946951 conn := c .Conn (ctx , t .L (), 1 )
952+ setMaxLifetime (conn )
947953
948954 // Constrain all existing zone configs to n1-n3.
949955 configureAllZones (t , ctx , conn , zoneConfig {replicas : 3 , onlyNodes : []int {1 , 2 , 3 }})
@@ -1057,6 +1063,7 @@ func runFailoverSystemNonLiveness(
10571063 c .Start (ctx , t .L (), failoverStartOpts (), settings , c .CRDBNodes ())
10581064
10591065 conn := c .Conn (ctx , t .L (), 1 )
1066+ setMaxLifetime (conn )
10601067
10611068 // Constrain all existing zone configs to n4-n6, except liveness which is
10621069 // constrained to n1-n3.
@@ -1861,3 +1868,10 @@ func getKVLabels(concurrency int, insertCount int, readPercent int) map[string]s
18611868 "read_percent" : fmt .Sprintf ("%d" , readPercent ),
18621869 }
18631870}
1871+
1872+ func setMaxLifetime (conn * gosql.DB ) {
1873+ // See https://github.com/cockroachdb/cockroach/issues/143121#issuecomment-2739835367.
1874+ // This is out of an abundance of caution, since we are often introducing network
1875+ // issues in failover tests.
1876+ conn .SetConnMaxLifetime (10 * time .Second )
1877+ }
0 commit comments