@@ -421,10 +421,11 @@ func TestConcurrentZip(t *testing.T) {
421421 defer func (prevStderr * os.File ) { stderr = prevStderr }(stderr )
422422 stderr = os .Stdout
423423
424- out , err := c .RunWithCapture ("debug zip --timeout=30s --cpu-profile-duration=0s --validate-zip-file=false " + os .DevNull )
425- if err != nil {
426- t .Fatal (err )
427- }
424+ out , err := c .RunWithCapture (fmt .Sprintf (
425+ "debug zip --timeout=30s --cpu-profile-duration=0s --validate-zip-file=false --cluster-name=%s %s" ,
426+ tc .ClusterName (), os .DevNull ,
427+ ))
428+ require .NoError (t , err )
428429
429430 // Strip any non-deterministic messages.
430431 out = eraseNonDeterministicZipOutput (out )
@@ -437,6 +438,8 @@ func TestConcurrentZip(t *testing.T) {
437438 // which the original messages interleve with other messages mean the number
438439 // of them after each series is collapsed is also non-derministic.
439440 out = regexp .MustCompile (`<dumping SQL tables>\n` ).ReplaceAllString (out , "" )
441+ // Replace the non-deterministic cluster name with a placeholder.
442+ out = eraseClusterName (out , tc .ClusterName ())
440443
441444 // We use datadriven simply to read the golden output file; we don't actually
442445 // run any commands. Using datadriven allows TESTFLAGS=-rewrite.
@@ -541,9 +544,8 @@ func TestUnavailableZip(t *testing.T) {
541544 tc := testcluster .StartTestCluster (t , 3 ,
542545 base.TestClusterArgs {ServerArgs : base.TestServerArgs {
543546 DefaultTestTenant : base .TestIsSpecificToStorageLayerAndNeedsASystemTenant ,
544-
545- Insecure : true ,
546- Knobs : base.TestingKnobs {Store : knobs },
547+ Insecure : true ,
548+ Knobs : base.TestingKnobs {Store : knobs },
547549 }})
548550 defer tc .Stopper ().Stop (context .Background ())
549551
@@ -559,9 +561,10 @@ func TestUnavailableZip(t *testing.T) {
559561 defer close (ch )
560562
561563 // Run debug zip against node 1.
562- debugZipCommand :=
563- "debug zip --concurrency=1 --cpu-profile-duration=0 " + os .
564- DevNull + " --timeout=.5s"
564+ debugZipCommand := fmt .Sprintf (
565+ "debug zip --concurrency=1 --cpu-profile-duration=0 --timeout=.5s --cluster-name=%s %s" ,
566+ tc .ClusterName (), os .DevNull ,
567+ )
565568
566569 t .Run ("server 1" , func (t * testing.T ) {
567570 c := TestCLI {
@@ -651,6 +654,10 @@ func baseZipOutput(nodeId int) []string {
651654 return output
652655}
653656
657+ func eraseClusterName (str , name string ) string {
658+ return strings .ReplaceAll (str , name , "<cluster-name>" )
659+ }
660+
654661func eraseNonDeterministicZipOutput (out string ) string {
655662 re := regexp .MustCompile (`(?m)postgresql://.*$` )
656663 out = re .ReplaceAllString (out , `postgresql://...` )
@@ -736,13 +743,15 @@ func TestPartialZip(t *testing.T) {
736743 defer func (prevStderr * os.File ) { stderr = prevStderr }(stderr )
737744 stderr = os .Stdout
738745
739- out , err := c .RunWithCapture ("debug zip --concurrency=1 --cpu-profile-duration=0s --validate-zip-file=false " + os .DevNull )
740- if err != nil {
741- t .Fatal (err )
742- }
746+ out , err := c .RunWithCapture (fmt .Sprintf (
747+ "debug zip --concurrency=1 --cpu-profile-duration=0s --validate-zip-file=false --cluster-name=%s %s" ,
748+ tc .ClusterName (), os .DevNull ,
749+ ))
750+ require .NoError (t , err )
743751
744752 // Strip any non-deterministic messages.
745753 t .Log (out )
754+ out = eraseClusterName (out , tc .ClusterName ())
746755 out = eraseNonDeterministicZipOutput (out )
747756
748757 datadriven .RunTest (t , datapathutils .TestDataPath (t , "zip" , "partial1" ),
@@ -751,12 +760,13 @@ func TestPartialZip(t *testing.T) {
751760 })
752761
753762 // Now do it again and exclude the down node explicitly.
754- out , err = c .RunWithCapture ("debug zip " + os . DevNull + " --concurrency=1 --exclude-nodes=2 --cpu-profile-duration=0" +
755- " --validate-zip-file=false" )
756- if err != nil {
757- t . Fatal ( err )
758- }
763+ out , err = c .RunWithCapture (fmt . Sprintf (
764+ "debug zip --concurrency=1 --exclude-nodes=2 --cpu-profile-duration=0 -- validate-zip-file=false --cluster-name=%s %s" ,
765+ tc . ClusterName (), os . DevNull ,
766+ ) )
767+ require . NoError ( t , err )
759768
769+ out = eraseClusterName (out , tc .ClusterName ())
760770 out = eraseNonDeterministicZipOutput (out )
761771 datadriven .RunTest (t , datapathutils .TestDataPath (t , "zip" , "partial1_excluded" ),
762772 func (t * testing.T , td * datadriven.TestData ) string {
@@ -767,12 +777,11 @@ func TestPartialZip(t *testing.T) {
767777 // skips over it automatically. We specifically use --wait=none because
768778 // we're decommissioning a node in a 3-node cluster, so there's no node to
769779 // up-replicate the under-replicated ranges to.
770- {
771- _ , err := c .RunWithCapture (fmt .Sprintf ("node decommission --checks=skip --wait=none %d" , 2 ))
772- if err != nil {
773- t .Fatal (err )
774- }
775- }
780+ _ , err = c .RunWithCapture (fmt .Sprintf (
781+ "node decommission --checks=skip --wait=none --cluster-name=%s %d" ,
782+ tc .ClusterName (), 2 ,
783+ ))
784+ require .NoError (t , err )
776785
777786 // We use .Override() here instead of SET CLUSTER SETTING in SQL to
778787 // override the 1m15s minimum placed on the cluster setting. There
@@ -787,12 +796,13 @@ func TestPartialZip(t *testing.T) {
787796 datadriven .RunTest (t , datapathutils .TestDataPath (t , "zip" , "partial2" ),
788797 func (t * testing.T , td * datadriven.TestData ) string {
789798 f := func () string {
790- out , err := c .RunWithCapture ("debug zip --concurrency=1 --cpu-profile-duration=0 --validate-zip-file=false " + os . DevNull )
791- if err != nil {
792- t . Fatal ( err )
793- }
794-
799+ out , err := c .RunWithCapture (fmt . Sprintf (
800+ "debug zip --concurrency=1 --cpu-profile-duration=0 --validate-zip-file=false --cluster-name=%s %s" ,
801+ tc . ClusterName (), os . DevNull ,
802+ ))
803+ require . NoError ( t , err )
795804 // Strip any non-deterministic messages.
805+ out = eraseClusterName (out , tc .ClusterName ())
796806 return eraseNonDeterministicZipOutput (out )
797807 }
798808
0 commit comments