Skip to content

Commit 30f9893

Browse files
committed
kubeadm: refactor the dry-run logic
The current dryrun client implemnetation is suboptimal and sparse. It has the following problems: - When an object CREATE or UPDATE reaches the default dryrun client the operation is a NO-OP, which means subsequent GET calls must fully emulate the object that exists in the store. - There are multiple implmentations of a DryRunGetter interface such the one in init_dryrun.go but there are no implementations for reset, upgrade, join. - There is a specific DryRunGetter that is backed by a real client in clientbacked_dryrun.go, but this is used for upgrade and does not work in conjuction with a fake client. This commit does the following changes: - Removes all existing *dryrun*.go implementations. - Add a new DryRun implementation in dryrun.go that implements 3 clients - fake clientset, real clientset, real dynamic client. - The DryRun object uses the method chaining pattern. - Allows the user opt-in into real clients only if needed, by passing a real kubeconfig. By default only constructs a fake client. - The default reactor chain for the fake client, always logs the object action, then for GET or LIST actions attempts to use the real dynamic client to get the object. If a real object does not exist it attempts to get the object from the fake object store. - The user can prepend or append reactors to the chain. - All known needed reactors for operations during init, join, reset, upgrade are added as methods of the DryRun struct. - Adds detailed unit test for the DryRun struct and its methods including reactors. Additional changes: - Use the new DryRun implementation in all command workflows - init, join, reset, upgrade. - Ensure that --dry-run works even if there is no active cluster by returning faked objects. For join, a faked cluster-info with a fake bootstrap token and CA are used.
1 parent 5973acc commit 30f9893

30 files changed

+1423
-990
lines changed

cmd/kubeadm/app/cmd/init.go

Lines changed: 11 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -28,6 +28,7 @@ import (
2828

2929
"k8s.io/apimachinery/pkg/util/sets"
3030
clientset "k8s.io/client-go/kubernetes"
31+
"k8s.io/klog/v2"
3132

3233
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
3334
kubeadmscheme "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/scheme"
@@ -349,10 +350,7 @@ func newInitData(cmd *cobra.Command, args []string, initOptions *initOptions, ou
349350
// if dry running creates a temporary folder for saving kubeadm generated files
350351
dryRunDir := ""
351352
if initOptions.dryRun || cfg.DryRun {
352-
// the KUBEADM_INIT_DRYRUN_DIR environment variable allows overriding the dry-run temporary
353-
// directory from the command line. This makes it possible to run "kubeadm init" integration
354-
// tests without root.
355-
if dryRunDir, err = kubeadmconstants.CreateTempDirForKubeadm(os.Getenv("KUBEADM_INIT_DRYRUN_DIR"), "kubeadm-init-dryrun"); err != nil {
353+
if dryRunDir, err = kubeadmconstants.GetDryRunDir(kubeadmconstants.EnvVarInitDryRunDir, "kubeadm-init-dryrun", klog.Warningf); err != nil {
356354
return nil, errors.Wrap(err, "couldn't create a temporary directory")
357355
}
358356
}
@@ -502,12 +500,16 @@ func (d *initData) OutputWriter() io.Writer {
502500

503501
// getDryRunClient creates a fake client that answers some GET calls in order to be able to do the full init flow in dry-run mode.
504502
func getDryRunClient(d *initData) (clientset.Interface, error) {
505-
svcSubnetCIDR, err := kubeadmconstants.GetKubernetesServiceCIDR(d.cfg.Networking.ServiceSubnet)
506-
if err != nil {
507-
return nil, errors.Wrapf(err, "unable to get internal Kubernetes Service IP from the given service CIDR (%s)", d.cfg.Networking.ServiceSubnet)
503+
dryRun := apiclient.NewDryRun()
504+
if err := dryRun.WithKubeConfigFile(d.KubeConfigPath()); err != nil {
505+
return nil, err
508506
}
509-
dryRunGetter := apiclient.NewInitDryRunGetter(d.cfg.NodeRegistration.Name, svcSubnetCIDR.String())
510-
return apiclient.NewDryRunClient(dryRunGetter, os.Stdout), nil
507+
dryRun.WithDefaultMarshalFunction().
508+
WithWriter(os.Stdout).
509+
PrependReactor(dryRun.GetNodeReactor()).
510+
PrependReactor(dryRun.PatchNodeReactor())
511+
512+
return dryRun.FakeClient(), nil
511513
}
512514

513515
// Client returns a Kubernetes client to be used by kubeadm.

cmd/kubeadm/app/cmd/join.go

Lines changed: 71 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -45,6 +45,8 @@ import (
4545
cmdutil "k8s.io/kubernetes/cmd/kubeadm/app/cmd/util"
4646
kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants"
4747
"k8s.io/kubernetes/cmd/kubeadm/app/discovery"
48+
"k8s.io/kubernetes/cmd/kubeadm/app/discovery/token"
49+
"k8s.io/kubernetes/cmd/kubeadm/app/util/apiclient"
4850
configutil "k8s.io/kubernetes/cmd/kubeadm/app/util/config"
4951
kubeconfigutil "k8s.io/kubernetes/cmd/kubeadm/app/util/kubeconfig"
5052
)
@@ -464,7 +466,7 @@ func newJoinData(cmd *cobra.Command, args []string, opt *joinOptions, out io.Wri
464466
// if dry running, creates a temporary folder to save kubeadm generated files
465467
dryRunDir := ""
466468
if opt.dryRun || cfg.DryRun {
467-
if dryRunDir, err = kubeadmconstants.CreateTempDirForKubeadm("", "kubeadm-join-dryrun"); err != nil {
469+
if dryRunDir, err = kubeadmconstants.GetDryRunDir(kubeadmconstants.EnvVarJoinDryRunDir, "kubeadm-join-dryrun", klog.Warningf); err != nil {
468470
return nil, errors.Wrap(err, "couldn't create a temporary directory on dryrun")
469471
}
470472
}
@@ -535,8 +537,19 @@ func (j *joinData) TLSBootstrapCfg() (*clientcmdapi.Config, error) {
535537
if j.tlsBootstrapCfg != nil {
536538
return j.tlsBootstrapCfg, nil
537539
}
540+
541+
var (
542+
client clientset.Interface
543+
err error
544+
)
545+
if j.dryRun {
546+
client, err = j.Client()
547+
if err != nil {
548+
return nil, errors.Wrap(err, "could not create a client for TLS bootstrap")
549+
}
550+
}
538551
klog.V(1).Infoln("[preflight] Discovering cluster-info")
539-
tlsBootstrapCfg, err := discovery.For(j.cfg)
552+
tlsBootstrapCfg, err := discovery.For(client, j.cfg)
540553
j.tlsBootstrapCfg = tlsBootstrapCfg
541554
return tlsBootstrapCfg, err
542555
}
@@ -550,19 +563,58 @@ func (j *joinData) InitCfg() (*kubeadmapi.InitConfiguration, error) {
550563
return nil, err
551564
}
552565
klog.V(1).Infoln("[preflight] Fetching init configuration")
553-
initCfg, err := fetchInitConfigurationFromJoinConfiguration(j.cfg, j.tlsBootstrapCfg)
566+
var client clientset.Interface
567+
if j.dryRun {
568+
var err error
569+
client, err = j.Client()
570+
if err != nil {
571+
return nil, errors.Wrap(err, "could not get dry-run client for fetching InitConfiguration")
572+
}
573+
}
574+
initCfg, err := fetchInitConfigurationFromJoinConfiguration(j.cfg, client, j.tlsBootstrapCfg)
554575
j.initCfg = initCfg
555576
return initCfg, err
556577
}
557578

558579
// Client returns the Client for accessing the cluster with the identity defined in admin.conf.
559580
func (j *joinData) Client() (clientset.Interface, error) {
560-
if j.client != nil {
581+
pathAdmin := filepath.Join(j.KubeConfigDir(), kubeadmconstants.AdminKubeConfigFileName)
582+
583+
if j.dryRun {
584+
dryRun := apiclient.NewDryRun()
585+
// For the dynamic dry-run client use this kubeconfig only if it exists.
586+
// That would happen presumably after TLS bootstrap.
587+
if _, err := os.Stat(pathAdmin); err == nil {
588+
if err := dryRun.WithKubeConfigFile(pathAdmin); err != nil {
589+
return nil, err
590+
}
591+
} else if j.tlsBootstrapCfg != nil {
592+
if err := dryRun.WithKubeConfig(j.tlsBootstrapCfg); err != nil {
593+
return nil, err
594+
}
595+
} else if j.cfg.Discovery.BootstrapToken != nil {
596+
insecureConfig := token.BuildInsecureBootstrapKubeConfig(j.cfg.Discovery.BootstrapToken.APIServerEndpoint)
597+
resetConfig, err := clientcmd.NewDefaultClientConfig(*insecureConfig, &clientcmd.ConfigOverrides{}).ClientConfig()
598+
if err != nil {
599+
return nil, errors.Wrap(err, "failed to create API client configuration from kubeconfig")
600+
}
601+
if err := dryRun.WithRestConfig(resetConfig); err != nil {
602+
return nil, err
603+
}
604+
}
605+
606+
dryRun.WithDefaultMarshalFunction().
607+
WithWriter(os.Stdout).
608+
AppendReactor(dryRun.GetClusterInfoReactor()).
609+
AppendReactor(dryRun.GetKubeadmConfigReactor()).
610+
AppendReactor(dryRun.GetKubeProxyConfigReactor()).
611+
AppendReactor(dryRun.GetKubeletConfigReactor())
612+
613+
j.client = dryRun.FakeClient()
561614
return j.client, nil
562615
}
563-
path := filepath.Join(j.KubeConfigDir(), kubeadmconstants.AdminKubeConfigFileName)
564616

565-
client, err := kubeconfigutil.ClientSetFromFile(path)
617+
client, err := kubeconfigutil.ClientSetFromFile(pathAdmin)
566618
if err != nil {
567619
return nil, errors.Wrap(err, "[preflight] couldn't create Kubernetes client")
568620
}
@@ -593,10 +645,18 @@ func (j *joinData) PatchesDir() string {
593645
}
594646

595647
// fetchInitConfigurationFromJoinConfiguration retrieves the init configuration from a join configuration, performing the discovery
596-
func fetchInitConfigurationFromJoinConfiguration(cfg *kubeadmapi.JoinConfiguration, tlsBootstrapCfg *clientcmdapi.Config) (*kubeadmapi.InitConfiguration, error) {
597-
// Retrieves the kubeadm configuration
648+
func fetchInitConfigurationFromJoinConfiguration(cfg *kubeadmapi.JoinConfiguration, client clientset.Interface, tlsBootstrapCfg *clientcmdapi.Config) (*kubeadmapi.InitConfiguration, error) {
649+
var err error
650+
598651
klog.V(1).Infoln("[preflight] Retrieving KubeConfig objects")
599-
initConfiguration, err := fetchInitConfiguration(tlsBootstrapCfg)
652+
if client == nil {
653+
// creates a client to access the cluster using the bootstrap token identity
654+
client, err = kubeconfigutil.ToClientSet(tlsBootstrapCfg)
655+
if err != nil {
656+
return nil, errors.Wrap(err, "unable to access the cluster")
657+
}
658+
}
659+
initConfiguration, err := fetchInitConfiguration(client)
600660
if err != nil {
601661
return nil, err
602662
}
@@ -618,15 +678,8 @@ func fetchInitConfigurationFromJoinConfiguration(cfg *kubeadmapi.JoinConfigurati
618678
}
619679

620680
// fetchInitConfiguration reads the cluster configuration from the kubeadm-admin configMap
621-
func fetchInitConfiguration(tlsBootstrapCfg *clientcmdapi.Config) (*kubeadmapi.InitConfiguration, error) {
622-
// creates a client to access the cluster using the bootstrap token identity
623-
tlsClient, err := kubeconfigutil.ToClientSet(tlsBootstrapCfg)
624-
if err != nil {
625-
return nil, errors.Wrap(err, "unable to access the cluster")
626-
}
627-
628-
// Fetches the init configuration
629-
initConfiguration, err := configutil.FetchInitConfigurationFromCluster(tlsClient, nil, "preflight", true, false)
681+
func fetchInitConfiguration(client clientset.Interface) (*kubeadmapi.InitConfiguration, error) {
682+
initConfiguration, err := configutil.FetchInitConfigurationFromCluster(client, nil, "preflight", true, false)
630683
if err != nil {
631684
return nil, errors.Wrap(err, "unable to fetch the kubeadm-config ConfigMap")
632685
}

cmd/kubeadm/app/cmd/phases/join/kubelet.go

Lines changed: 14 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -30,6 +30,7 @@ import (
3030
apierrors "k8s.io/apimachinery/pkg/api/errors"
3131
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
3232
"k8s.io/apimachinery/pkg/util/wait"
33+
clientset "k8s.io/client-go/kubernetes"
3334
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
3435
certutil "k8s.io/client-go/util/cert"
3536
"k8s.io/klog/v2"
@@ -150,11 +151,18 @@ func runKubeletStartJoinPhase(c workflow.RunData) (returnErr error) {
150151
}()
151152
}
152153

153-
// Create the bootstrap client before we possibly overwrite the server address
154-
// for ControlPlaneKubeletLocalMode.
155-
bootstrapClient, err := kubeconfigutil.ToClientSet(tlsBootstrapCfg)
156-
if err != nil {
157-
return errors.Errorf("could not create client from bootstrap kubeconfig")
154+
var client clientset.Interface
155+
// If dry-use the client from joinData, else create a new bootstrap client
156+
if data.DryRun() {
157+
client, err = data.Client()
158+
if err != nil {
159+
return err
160+
}
161+
} else {
162+
client, err = kubeconfigutil.ToClientSet(tlsBootstrapCfg)
163+
if err != nil {
164+
return errors.Errorf("could not create client from bootstrap kubeconfig")
165+
}
158166
}
159167

160168
if features.Enabled(initCfg.FeatureGates, features.ControlPlaneKubeletLocalMode) {
@@ -204,7 +212,7 @@ func runKubeletStartJoinPhase(c workflow.RunData) (returnErr error) {
204212
// A new Node with the same name as an existing control-plane Node can cause undefined
205213
// behavior and ultimately control-plane failure.
206214
klog.V(1).Infof("[kubelet-start] Checking for an existing Node in the cluster with name %q and status %q", nodeName, v1.NodeReady)
207-
node, err := bootstrapClient.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{})
215+
node, err := client.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{})
208216
if err != nil && !apierrors.IsNotFound(err) {
209217
return errors.Wrapf(err, "cannot get Node %q", nodeName)
210218
}

cmd/kubeadm/app/cmd/phases/reset/cleanupnode.go

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -75,7 +75,7 @@ func runCleanupNode(c workflow.RunData) error {
7575
klog.Warningln("[reset] Please ensure kubelet is stopped manually")
7676
}
7777
} else {
78-
fmt.Println("[reset] Would stop the kubelet service")
78+
fmt.Println("[dryrun] Would stop the kubelet service")
7979
}
8080
}
8181

@@ -96,7 +96,7 @@ func runCleanupNode(c workflow.RunData) error {
9696
dirsToClean = append(dirsToClean, kubeletRunDirectory)
9797
}
9898
} else {
99-
fmt.Printf("[reset] Would unmount mounted directories in %q\n", kubeadmconstants.KubeletRunDirectory)
99+
fmt.Printf("[dryrun] Would unmount mounted directories in %q\n", kubeadmconstants.KubeletRunDirectory)
100100
}
101101

102102
if !r.DryRun() {
@@ -105,7 +105,7 @@ func runCleanupNode(c workflow.RunData) error {
105105
klog.Warningf("[reset] Failed to remove containers: %v\n", err)
106106
}
107107
} else {
108-
fmt.Println("[reset] Would remove Kubernetes-managed containers")
108+
fmt.Println("[dryrun] Would remove Kubernetes-managed containers")
109109
}
110110

111111
// Remove contents from the config and pki directories
@@ -115,7 +115,7 @@ func runCleanupNode(c workflow.RunData) error {
115115

116116
dirsToClean = append(dirsToClean, certsDir)
117117
if r.CleanupTmpDir() {
118-
tempDir := path.Join(kubeadmconstants.KubernetesDir, kubeadmconstants.TempDirForKubeadm)
118+
tempDir := path.Join(kubeadmconstants.KubernetesDir, kubeadmconstants.TempDir)
119119
dirsToClean = append(dirsToClean, tempDir)
120120
}
121121
resetConfigDir(kubeadmconstants.KubernetesDir, dirsToClean, r.DryRun())
@@ -127,7 +127,7 @@ func runCleanupNode(c workflow.RunData) error {
127127
klog.Warningf("[reset] Failed to remove users and groups: %v\n", err)
128128
}
129129
} else {
130-
fmt.Println("[reset] Would remove users and groups created for rootless control-plane")
130+
fmt.Println("[dryrun] Would remove users and groups created for rootless control-plane")
131131
}
132132
}
133133

@@ -156,7 +156,7 @@ func resetConfigDir(configPathDir string, dirsToClean []string, isDryRun bool) {
156156
}
157157
}
158158
} else {
159-
fmt.Printf("[reset] Would delete contents of directories: %v\n", dirsToClean)
159+
fmt.Printf("[dryrun] Would delete contents of directories: %v\n", dirsToClean)
160160
}
161161

162162
filesToClean := []string{
@@ -176,7 +176,7 @@ func resetConfigDir(configPathDir string, dirsToClean []string, isDryRun bool) {
176176
}
177177
}
178178
} else {
179-
fmt.Printf("[reset] Would delete files: %v\n", filesToClean)
179+
fmt.Printf("[dryrun] Would delete files: %v\n", filesToClean)
180180
}
181181
}
182182

cmd/kubeadm/app/cmd/phases/reset/cleanupnode_test.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -185,7 +185,7 @@ func TestConfigDirCleaner(t *testing.T) {
185185
dirsToClean := []string{
186186
filepath.Join(tmpDir, test.resetDir),
187187
filepath.Join(tmpDir, kubeadmconstants.ManifestsSubDirName),
188-
filepath.Join(tmpDir, kubeadmconstants.TempDirForKubeadm),
188+
filepath.Join(tmpDir, kubeadmconstants.TempDir),
189189
}
190190
resetConfigDir(tmpDir, dirsToClean, false)
191191

cmd/kubeadm/app/cmd/phases/reset/removeetcdmember.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -74,8 +74,8 @@ func runRemoveETCDMemberPhase(c workflow.RunData) error {
7474
}
7575
}
7676
} else {
77-
fmt.Println("[reset] Would remove the etcd member on this node from the etcd cluster")
78-
fmt.Printf("[reset] Would delete contents of the etcd data directory: %v\n", etcdDataDir)
77+
fmt.Println("[dryrun] Would remove the etcd member on this node from the etcd cluster")
78+
fmt.Printf("[dryrun] Would delete contents of the etcd data directory: %v\n", etcdDataDir)
7979
}
8080
}
8181
// This could happen if the phase `cleanup-node` is run before the `remove-etcd-member`.

cmd/kubeadm/app/cmd/reset.go

Lines changed: 24 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,7 @@ import (
2020
"errors"
2121
"fmt"
2222
"io"
23+
"os"
2324
"path"
2425

2526
"github.com/lithammer/dedent"
@@ -39,7 +40,9 @@ import (
3940
"k8s.io/kubernetes/cmd/kubeadm/app/cmd/phases/workflow"
4041
cmdutil "k8s.io/kubernetes/cmd/kubeadm/app/cmd/util"
4142
kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants"
43+
"k8s.io/kubernetes/cmd/kubeadm/app/util/apiclient"
4244
configutil "k8s.io/kubernetes/cmd/kubeadm/app/util/config"
45+
kubeconfigutil "k8s.io/kubernetes/cmd/kubeadm/app/util/kubeconfig"
4346
utilruntime "k8s.io/kubernetes/cmd/kubeadm/app/util/runtime"
4447
)
4548

@@ -104,7 +107,10 @@ func newResetData(cmd *cobra.Command, opts *resetOptions, in io.Reader, out io.W
104107
return nil, err
105108
}
106109

107-
var initCfg *kubeadmapi.InitConfiguration
110+
var (
111+
initCfg *kubeadmapi.InitConfiguration
112+
client clientset.Interface
113+
)
108114

109115
// Either use the config file if specified, or convert public kubeadm API to the internal ResetConfiguration and validates cfg.
110116
resetCfg, err := configutil.LoadOrDefaultResetConfiguration(opts.cfgPath, opts.externalcfg, configutil.LoadOrDefaultConfigurationOptions{
@@ -115,7 +121,21 @@ func newResetData(cmd *cobra.Command, opts *resetOptions, in io.Reader, out io.W
115121
return nil, err
116122
}
117123

118-
client, err := cmdutil.GetClientSet(opts.kubeconfigPath, false)
124+
dryRunFlag := cmdutil.ValueFromFlagsOrConfig(cmd.Flags(), options.DryRun, resetCfg.DryRun, opts.externalcfg.DryRun).(bool)
125+
if dryRunFlag {
126+
dryRun := apiclient.NewDryRun().WithDefaultMarshalFunction().WithWriter(os.Stdout)
127+
dryRun.AppendReactor(dryRun.GetKubeadmConfigReactor()).
128+
AppendReactor(dryRun.GetKubeletConfigReactor()).
129+
AppendReactor(dryRun.GetKubeProxyConfigReactor())
130+
client = dryRun.FakeClient()
131+
_, err = os.Stat(opts.kubeconfigPath)
132+
if err == nil {
133+
err = dryRun.WithKubeConfigFile(opts.kubeconfigPath)
134+
}
135+
} else {
136+
client, err = kubeconfigutil.ClientSetFromFile(opts.kubeconfigPath)
137+
}
138+
119139
if err == nil {
120140
klog.V(1).Infof("[reset] Loaded client set from kubeconfig file: %s", opts.kubeconfigPath)
121141
initCfg, err = configutil.FetchInitConfigurationFromCluster(client, nil, "reset", false, false)
@@ -162,7 +182,7 @@ func newResetData(cmd *cobra.Command, opts *resetOptions, in io.Reader, out io.W
162182
outputWriter: out,
163183
cfg: initCfg,
164184
resetCfg: resetCfg,
165-
dryRun: cmdutil.ValueFromFlagsOrConfig(cmd.Flags(), options.DryRun, resetCfg.DryRun, opts.externalcfg.DryRun).(bool),
185+
dryRun: dryRunFlag,
166186
forceReset: cmdutil.ValueFromFlagsOrConfig(cmd.Flags(), options.ForceReset, resetCfg.Force, opts.externalcfg.Force).(bool),
167187
cleanupTmpDir: cmdutil.ValueFromFlagsOrConfig(cmd.Flags(), options.CleanupTmpDir, resetCfg.CleanupTmpDir, opts.externalcfg.CleanupTmpDir).(bool),
168188
}, nil
@@ -184,7 +204,7 @@ func AddResetFlags(flagSet *flag.FlagSet, resetOptions *resetOptions) {
184204
)
185205
flagSet.BoolVar(
186206
&resetOptions.externalcfg.CleanupTmpDir, options.CleanupTmpDir, resetOptions.externalcfg.CleanupTmpDir,
187-
fmt.Sprintf("Cleanup the %q directory", path.Join(kubeadmconstants.KubernetesDir, kubeadmconstants.TempDirForKubeadm)),
207+
fmt.Sprintf("Cleanup the %q directory", path.Join(kubeadmconstants.KubernetesDir, kubeadmconstants.TempDir)),
188208
)
189209
options.AddKubeConfigFlag(flagSet, &resetOptions.kubeconfigPath)
190210
options.AddConfigFlag(flagSet, &resetOptions.cfgPath)

cmd/kubeadm/app/cmd/reset_test.go

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -219,6 +219,9 @@ func TestNewResetData(t *testing.T) {
219219
resetOptions := newResetOptions()
220220
cmd := newCmdReset(nil, nil, resetOptions)
221221

222+
// make sure all cases use dry-run as we are not constructing a kubeconfig
223+
tc.flags[options.DryRun] = "true"
224+
222225
// sets cmd flags (that will be reflected on the reset options)
223226
for f, v := range tc.flags {
224227
cmd.Flags().Set(f, v)

0 commit comments

Comments
 (0)