diff --git a/minikube/lib/wait_validator.go b/minikube/lib/wait_validator.go new file mode 100644 index 0000000..66c0d3f --- /dev/null +++ b/minikube/lib/wait_validator.go @@ -0,0 +1,63 @@ +package lib + +import ( + "fmt" + "strings" +) + +var standardOptions = []string{ + "apiserver", + "system_pods", + "default_sa", + "apps_running", + "node_ready", + "kubelet", +} + +var specialOptions = []string{ + "all", + "none", + "true", + "false", +} + +func ValidateWait(v map[string]bool) error { + var invalidOptions []string + + for key := range v { + if !contains(standardOptions, key) || contains(specialOptions, key) { + invalidOptions = append(invalidOptions, key) + } + } + + if len(invalidOptions) > 0 { + return fmt.Errorf("invalid wait option(s): %s", strings.Join(invalidOptions, ", ")) + } + + return nil +} + +func contains(slice []string, item string) bool { + for _, s := range slice { + if s == item { + return true + } + } + return false +} + +func ResolveSpecialWaitOptions(input map[string]bool) map[string]bool { + if input["all"] || input["true"] { + result := make(map[string]bool) + for _, opt := range standardOptions { + result[opt] = true + } + return result + } + + if input["none"] || input["false"] { + return make(map[string]bool) + } + + return input +} diff --git a/minikube/lib/wait_validator_test.go b/minikube/lib/wait_validator_test.go new file mode 100644 index 0000000..eb5d452 --- /dev/null +++ b/minikube/lib/wait_validator_test.go @@ -0,0 +1,134 @@ +package lib + +import ( + "reflect" + "testing" +) + +func TestValidateWait(t *testing.T) { + tests := []struct { + name string + input map[string]bool + expectedError string + }{ + { + name: "Valid options", + input: map[string]bool{"apiserver": true, "system_pods": true}, + expectedError: "", + }, + { + name: "Invalid option", + input: map[string]bool{"invalid_option": true}, + expectedError: "invalid wait option(s): invalid_option", + }, + { + name: "Multiple invalid options", + input: map[string]bool{"invalid1": true, "invalid2": true, "apiserver": true}, + expectedError: "invalid wait option(s): invalid1, invalid2", + }, + { + name: "Special option", + input: map[string]bool{"all": true}, + expectedError: "invalid wait option(s): all", + }, + { + name: "Empty input", + input: map[string]bool{}, + expectedError: "", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := ValidateWait(tt.input) + if (err == nil && tt.expectedError != "") || (err != nil && err.Error() != tt.expectedError) { + t.Errorf("ValidateWait() error = %v, expectedError %v", err, tt.expectedError) + } + }) + } +} + +func TestResolveSpecialWaitOptions(t *testing.T) { + tests := []struct { + name string + input map[string]bool + expected map[string]bool + }{ + { + name: "All true", + input: map[string]bool{"all": true}, + expected: map[string]bool{"apiserver": true, "system_pods": true, "default_sa": true, "apps_running": true, "node_ready": true, "kubelet": true}, + }, + { + name: "True", + input: map[string]bool{"true": true}, + expected: map[string]bool{"apiserver": true, "system_pods": true, "default_sa": true, "apps_running": true, "node_ready": true, "kubelet": true}, + }, + { + name: "None", + input: map[string]bool{"none": true}, + expected: map[string]bool{}, + }, + { + name: "False", + input: map[string]bool{"false": true}, + expected: map[string]bool{}, + }, + { + name: "Standard options", + input: map[string]bool{"apiserver": true, "system_pods": true}, + expected: map[string]bool{"apiserver": true, "system_pods": true}, + }, + { + name: "Empty input", + input: map[string]bool{}, + expected: map[string]bool{}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := ResolveSpecialWaitOptions(tt.input) + if !reflect.DeepEqual(result, tt.expected) { + t.Errorf("ResolveSpecialWaitOptions() = %v, want %v", result, tt.expected) + } + }) + } +} + +func TestContains(t *testing.T) { + tests := []struct { + name string + slice []string + item string + expected bool + }{ + { + name: "Item present", + slice: []string{"a", "b", "c"}, + item: "b", + expected: true, + }, + { + name: "Item not present", + slice: []string{"a", "b", "c"}, + item: "d", + expected: false, + }, + { + name: "Empty slice", + slice: []string{}, + item: "a", + expected: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := contains(tt.slice, tt.item) + if result != tt.expected { + t.Errorf("contains() = %v, want %v", result, tt.expected) + } + }) + } +} diff --git a/minikube/resource_cluster.go b/minikube/resource_cluster.go index 1376a61..13ad323 100644 --- a/minikube/resource_cluster.go +++ b/minikube/resource_cluster.go @@ -75,8 +75,8 @@ func resourceClusterUpdate(ctx context.Context, d *schema.ResourceData, m interf if d.HasChange("addons") { config := client.GetConfig() oldAddons, newAddons := d.GetChange("addons") - oldAddonStrings := getAddons(oldAddons.(*schema.Set)) - newAddonStrings := getAddons(newAddons.(*schema.Set)) + oldAddonStrings := state_utils.SetToSlice(oldAddons.(*schema.Set)) + newAddonStrings := state_utils.SetToSlice(newAddons.(*schema.Set)) client.SetConfig(lib.MinikubeClientConfig{ ClusterConfig: config.ClusterConfig, @@ -248,7 +248,7 @@ func initialiseMinikubeClient(d *schema.ResourceData, m interface{}) (lib.Cluste addons = &schema.Set{} } - addonStrings := getAddons(addons.(*schema.Set)) + addonStrings := state_utils.SetToSlice(addons.(*schema.Set)) defaultIsos, ok := d.GetOk("iso_url") if !ok { @@ -357,6 +357,19 @@ func initialiseMinikubeClient(d *schema.ResourceData, m interface{}) (lib.Cluste return nil, errors.New("at least 3 nodes is required for high availability") } + vcs := state_utils.SetToSlice(d.Get("wait").(*schema.Set)) + vc := make(map[string]bool) + for _, c := range vcs { + vc[c] = true + } + + err = lib.ValidateWait(vc) + if err != nil { + return nil, err + } + + vc = lib.ResolveSpecialWaitOptions(vc) + cc := config.ClusterConfig{ Addons: addonConfig, APIServerPort: d.Get("apiserver_port").(int), @@ -422,6 +435,7 @@ func initialiseMinikubeClient(d *schema.ResourceData, m interface{}) (lib.Cluste GPUs: d.Get("gpus").(string), SocketVMnetPath: d.Get("socket_vmnet_path").(string), SocketVMnetClientPath: d.Get("socket_vmnet_client_path").(string), + VerifyComponents: vc, } clusterClient.SetConfig(lib.MinikubeClientConfig{ @@ -441,15 +455,3 @@ func initialiseMinikubeClient(d *schema.ResourceData, m interface{}) (lib.Cluste return clusterClient, nil } - -func getAddons(addons *schema.Set) []string { - addonStrings := make([]string, addons.Len()) - addonObjects := addons.List() - for i, v := range addonObjects { - addonStrings[i] = v.(string) - } - - sort.Strings(addonStrings) //to ensure consistency with TF state - - return addonStrings -} diff --git a/minikube/resource_cluster_test.go b/minikube/resource_cluster_test.go index 314f8f9..4557b6e 100644 --- a/minikube/resource_cluster_test.go +++ b/minikube/resource_cluster_test.go @@ -80,6 +80,17 @@ func TestClusterHA(t *testing.T) { }, }) } +func TestClusterWait(t *testing.T) { + resource.Test(t, resource.TestCase{ + IsUnitTest: true, + Providers: map[string]*schema.Provider{"minikube": NewProvider(mockSuccess(mockClusterClientProperties{t, "TestClusterCreationWait", 1, 0}))}, + Steps: []resource.TestStep{ + { + Config: testUnitClusterWaitConfig("some_driver", "TestClusterCreationWait"), + }, + }, + }) +} func TestClusterCreation_Docker(t *testing.T) { resource.Test(t, resource.TestCase{ @@ -226,6 +237,21 @@ func TestClusterCreation_HAControlPlane(t *testing.T) { }) } +func TestClusterCreation_Wait(t *testing.T) { + resource.Test(t, resource.TestCase{ + Providers: map[string]*schema.Provider{"minikube": Provider()}, + CheckDestroy: verifyDelete, + Steps: []resource.TestStep{ + { + Config: testAcceptanceClusterConfig_Wait("docker", "TestClusterCreationDocker"), + Check: resource.ComposeTestCheckFunc( + testPropertyExists("minikube_cluster.new", "TestClusterCreationDocker"), + ), + }, + }, + }) +} + func TestClusterCreation_Hyperkit(t *testing.T) { if runtime.GOOS != "darwin" { t.Skip("Hyperkit is only supported on macOS") @@ -522,6 +548,17 @@ func testUnitClusterHAConfig(driver string, clusterName string) string { `, driver, clusterName) } +func testUnitClusterWaitConfig(driver string, clusterName string) string { + return fmt.Sprintf(` + resource "minikube_cluster" "new" { + driver = "%s" + cluster_name = "%s" + + wait = [ "apiserver" ] + } + `, driver, clusterName) +} + func testUnitClusterConfig_Update(driver string, clusterName string) string { return fmt.Sprintf(` resource "minikube_cluster" "new" { @@ -696,6 +733,21 @@ func testAcceptanceClusterConfig_HAControlPlane(driver string, clusterName strin `, driver, clusterName) } +func testAcceptanceClusterConfig_Wait(driver string, clusterName string) string { + return fmt.Sprintf(` + resource "minikube_cluster" "new" { + driver = "%s" + cluster_name = "%s" + cpus = 2 + memory = "6000GiB" + + wait = [ + "apps_running" + ] + } + `, driver, clusterName) +} + func verifyDelete(s *terraform.State) error { for _, rs := range s.RootModule().Resources { if rs.Type != "minikube_cluster" { diff --git a/minikube/state_utils/slice.go b/minikube/state_utils/slice.go index 6f1f8b2..08384e9 100644 --- a/minikube/state_utils/slice.go +++ b/minikube/state_utils/slice.go @@ -38,3 +38,15 @@ func ReadSliceState(slice interface{}) []string { return stringSlice } + +func SetToSlice(s *schema.Set) []string { + ss := make([]string, s.Len()) + so := s.List() + for i, v := range so { + ss[i] = v.(string) + } + + sort.Strings(ss) //to ensure consistency with TF state + + return ss +} diff --git a/minikube/state_utils/slice_test.go b/minikube/state_utils/slice_test.go index 8aa5771..3c50e73 100644 --- a/minikube/state_utils/slice_test.go +++ b/minikube/state_utils/slice_test.go @@ -86,3 +86,41 @@ func TestReadSliceState(t *testing.T) { }) } } + +func TestSetToSlice(t *testing.T) { + tests := []struct { + name string + input *schema.Set + expected []string + }{ + { + name: "Empty set", + input: schema.NewSet(schema.HashString, []interface{}{}), + expected: []string{}, + }, + { + name: "Set with single item", + input: schema.NewSet(schema.HashString, []interface{}{"apple"}), + expected: []string{"apple"}, + }, + { + name: "Set with multiple items", + input: schema.NewSet(schema.HashString, []interface{}{"banana", "apple", "cherry"}), + expected: []string{"apple", "banana", "cherry"}, + }, + { + name: "Set with duplicate items", + input: schema.NewSet(schema.HashString, []interface{}{"apple", "banana", "apple", "cherry"}), + expected: []string{"apple", "banana", "cherry"}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := SetToSlice(tt.input) + if !reflect.DeepEqual(result, tt.expected) { + t.Errorf("SetToSlice() = %v, want %v", result, tt.expected) + } + }) + } +}