@@ -1949,6 +1949,143 @@ def test_aks_enable_addon_with_azurekeyvaultsecretsprovider(
19491949 ],
19501950 )
19511951
1952+ @AllowLargeResponse()
1953+ @AKSCustomResourceGroupPreparer(
1954+ random_name_length=17, name_prefix="clitest", location="westus2"
1955+ )
1956+ def test_aks_create_normal_cluster_then_add_managed_system_pool(
1957+ self, resource_group, resource_group_location
1958+ ):
1959+ aks_name = self.create_random_name("cliakstest", 16)
1960+ nodepool_name = self.create_random_name("msnp", 12)
1961+ self.kwargs.update(
1962+ {
1963+ "resource_group": resource_group,
1964+ "name": aks_name,
1965+ "nodepool_name": nodepool_name,
1966+ "ssh_key_value": self.generate_ssh_keys(),
1967+ }
1968+ )
1969+
1970+ # Create a normal cluster without managed system pool
1971+ create_cmd = (
1972+ "aks create --resource-group={resource_group} --name={name} "
1973+ "-c 1 "
1974+ "--ssh-key-value={ssh_key_value} -o json"
1975+ )
1976+ self.cmd(
1977+ create_cmd,
1978+ checks=[
1979+ self.check("provisioningState", "Succeeded"),
1980+ self.check("agentPoolProfiles[0].mode", "System"),
1981+ ],
1982+ )
1983+
1984+ # Add a ManagedSystem node pool to the existing cluster should succeed
1985+ add_nodepool_cmd = (
1986+ "aks nodepool add --resource-group={resource_group} --cluster-name={name} "
1987+ "--name={nodepool_name} --mode ManagedSystem --node-count 1"
1988+ )
1989+ self.cmd(
1990+ add_nodepool_cmd,
1991+ checks=[
1992+ self.check("mode", "ManagedSystem"),
1993+ self.check("provisioningState", "Succeeded"),
1994+ ],
1995+ )
1996+
1997+ # Verify that the cluster now has both a normal System pool and a ManagedSystem pool
1998+ show_cmd = "aks show --resource-group={resource_group} --name={name} -o json"
1999+ cluster_info = self.cmd(show_cmd).get_output_in_json()
2000+
2001+ # Check that we have two node pools with different modes
2002+ agent_pools = cluster_info["agentPoolProfiles"]
2003+ modes = [pool["mode"] for pool in agent_pools]
2004+ assert "System" in modes, "Should have a normal System pool"
2005+ assert "ManagedSystem" in modes, "Should have a ManagedSystem pool"
2006+
2007+
2008+ @AllowLargeResponse()
2009+ @AKSCustomResourceGroupPreparer(
2010+ random_name_length=17, name_prefix="clitest", location="westus2"
2011+ )
2012+ def test_aks_create_with_managed_system_pool_multiple_fails(
2013+ self, resource_group, resource_group_location
2014+ ):
2015+ aks_name = self.create_random_name("cliakstest", 16)
2016+ nodepool_name = self.create_random_name("np", 12)
2017+ self.kwargs.update(
2018+ {
2019+ "resource_group": resource_group,
2020+ "name": aks_name,
2021+ "nodepool_name": nodepool_name,
2022+ "ssh_key_value": self.generate_ssh_keys(),
2023+ }
2024+ )
2025+
2026+ # Create cluster with managed system pool
2027+ create_cmd = (
2028+ "aks create --resource-group={resource_group} --name={name} "
2029+ "-c 1 "
2030+ "--enable-managed-system-pool "
2031+ "--ssh-key-value={ssh_key_value} -o json"
2032+ )
2033+ self.cmd(
2034+ create_cmd,
2035+ checks=[
2036+ self.check("provisioningState", "Succeeded"),
2037+ self.check("agentPoolProfiles[0].mode", "ManagedSystem"),
2038+ ],
2039+ )
2040+
2041+ # Attempt to add another ManagedSystem node pool should fail
2042+ add_nodepool_cmd = (
2043+ "aks nodepool add --resource-group={resource_group} --cluster-name={name} "
2044+ "--name={nodepool_name} --mode ManagedSystem --node-count 1"
2045+ )
2046+
2047+ # This should fail because only one ManagedSystem pool is allowed per cluster
2048+ with self.assertRaisesRegex(
2049+ (CLIError, ClientRequestError, HttpResponseError),
2050+ "only.*one.*ManagedSystem.*pool.*allowed|ManagedSystem.*pool.*already.*exists|cannot.*add.*multiple.*ManagedSystem"
2051+ ):
2052+ self.cmd(add_nodepool_cmd)
2053+
2054+ # Update the ManagedSystem pool to normal system pool should succeed
2055+ update_nodepool_cmd = (
2056+ "aks nodepool update --resource-group={resource_group} --cluster-name={name} "
2057+ "--name=nodepool1 --mode System"
2058+ )
2059+ self.cmd(
2060+ update_nodepool_cmd,
2061+ checks=[
2062+ self.check("mode", "System"),
2063+ ],
2064+ )
2065+ aks_name = self.create_random_name("cliakstest", 16)
2066+ self.kwargs.update(
2067+ {
2068+ "resource_group": resource_group,
2069+ "name": aks_name,
2070+ "ssh_key_value": self.generate_ssh_keys(),
2071+ }
2072+ )
2073+
2074+ create_cmd = (
2075+ "aks create --resource-group={resource_group} --name={name} "
2076+ "-c 1 "
2077+ "--enable-managed-system-pool "
2078+ "--ssh-key-value={ssh_key_value} -o json"
2079+ )
2080+ self.cmd(
2081+ create_cmd,
2082+ checks=[
2083+ self.check("provisioningState", "Succeeded"),
2084+ self.check("agentPoolProfiles[0].mode", "ManagedSystem"),
2085+ self.check("agentPoolProfiles[0].type", "VirtualMachines"),
2086+ ],
2087+ )
2088+
19522089 @AllowLargeResponse()
19532090 @AKSCustomResourceGroupPreparer(
19542091 random_name_length=17, name_prefix="clitest", location="westus2"
@@ -2647,7 +2784,7 @@ def test_aks_nodepool_add_with_ossku_windows2022(
26472784 "aks delete -g {resource_group} -n {name} --yes --no-wait",
26482785 checks=[self.is_empty()],
26492786 )
2650-
2787+
26512788 @AllowLargeResponse()
26522789 @AKSCustomResourceGroupPreparer(random_name_length=17, name_prefix='clitest', location='westus')
26532790 def test_aks_nodepool_add_with_ossku_ubuntu2204(self, resource_group, resource_group_location):
@@ -3165,7 +3302,7 @@ def test_aks_nodepool_undrainable_node_behavior(
31653302 checks=[self.is_empty()],
31663303 )
31673304
3168-
3305+
31693306 @AllowLargeResponse()
31703307 @AKSCustomResourceGroupPreparer(
31713308 random_name_length=17, name_prefix="clitest", location="westus2"
@@ -7241,7 +7378,7 @@ def test_aks_maintenanceconfiguration(
72417378 "aks delete -g {resource_group} -n {name} --yes --no-wait",
72427379 checks=[self.is_empty()],
72437380 )
7244-
7381+
72457382 @AllowLargeResponse()
72467383 @AKSCustomResourceGroupPreparer(random_name_length=17, name_prefix='clitest', location='uksouth')
72477384 def test_aks_managed_namespace(self, resource_group, resource_group_location):
@@ -7294,21 +7431,21 @@ def test_aks_managed_namespace(self, resource_group, resource_group_location):
72947431 list_namespace_full_cmd = (
72957432 "aks namespace list --resource-group={resource_group} --cluster-name={resource_name} -o json"
72967433 )
7297-
7434+
72987435 namespace_list = self.cmd(list_namespace_full_cmd).get_output_in_json()
72997436 assert len(namespace_list) > 0
73007437
73017438 list_namespace_with_resource_group_cmd = (
73027439 "aks namespace list --resource-group={resource_group} -o json"
73037440 )
7304-
7441+
73057442 namespace_list = self.cmd(list_namespace_with_resource_group_cmd).get_output_in_json()
73067443 assert len(namespace_list) > 0
73077444
73087445 list_namespace_subscription_level_cmd = (
73097446 "aks namespace list -o json"
73107447 )
7311-
7448+
73127449 namespace_list = self.cmd(list_namespace_subscription_level_cmd).get_output_in_json()
73137450 assert len(namespace_list) > 0
73147451
@@ -12651,7 +12788,7 @@ def test_aks_azure_service_mesh_with_egress_gateway(
1265112788 ):
1265212789 """This test case exercises enabling and disabling an Istio egress gateway.
1265312790
12654- It creates a cluster with azure service mesh profile and Static Egress Gateway enabled.
12791+ It creates a cluster with azure service mesh profile and Static Egress Gateway enabled.
1265512792 After that, we create a gateway nodepool and a staticgatewayconfiguration resource.
1265612793 Then, we create an Istio egress gateway, and then delete it.
1265712794 """
@@ -12742,7 +12879,7 @@ def test_aks_azure_service_mesh_with_egress_gateway(
1274212879 self.cmd(get_credential_cmd)
1274312880 finally:
1274412881 os.close(fd)
12745-
12882+
1274612883 sgcResource = f"""apiVersion: egressgateway.kubernetes.azure.com/v1alpha1
1274712884kind: StaticGatewayConfiguration
1274812885metadata:
@@ -12751,7 +12888,7 @@ def test_aks_azure_service_mesh_with_egress_gateway(
1275112888spec:
1275212889 gatewayNodepoolName: {gwNodepoolName}
1275312890"""
12754-
12891+
1275512892 sgc_fd, sgc_browse_path = tempfile.mkstemp()
1275612893
1275712894 try:
@@ -13655,7 +13792,7 @@ def test_aks_create_with_advanced_networkpolicies(
1365513792 "aks delete -g {resource_group} -n {name} --yes --no-wait",
1365613793 checks=[self.is_empty()],
1365713794 )
13658-
13795+
1365913796 @AllowLargeResponse()
1366013797 @AKSCustomResourceGroupPreparer(
1366113798 random_name_length=17,
@@ -13701,7 +13838,7 @@ def test_aks_create_with_transit_encryption_type(
1370113838 "aks delete -g {resource_group} -n {name} --yes --no-wait",
1370213839 checks=[self.is_empty()],
1370313840 )
13704-
13841+
1370513842 @AllowLargeResponse()
1370613843 @AKSCustomResourceGroupPreparer(
1370713844 random_name_length=17,
@@ -16204,11 +16341,11 @@ def test_aks_extension_backup(self, resource_group, resource_group_location):
1620416341 '--location {location} --sku Standard_LRS '
1620516342 '--allow-shared-key-access false')
1620616343
16207- # create blob container in storage account
16208- self.cmd('storage container create --name {blob} --account-name {storageAccount} '
16344+ # create blob container in storage account
16345+ self.cmd('storage container create --name {blob} --account-name {storageAccount} '
1620916346 '--auth-mode login')
1621016347
16211- # create the cluster
16348+ # create the cluster
1621216349 response = self.cmd('aks create -g {rg} -n {cluster_name} '
1621316350 '--node-count 3 --ssh-key-value={ssh_key_value}').get_output_in_json()
1621416351 cluster_resource_id = response["id"]
@@ -16220,13 +16357,13 @@ def test_aks_extension_backup(self, resource_group, resource_group_location):
1622016357 # create the K8s extension
1622116358 self.cmd('aks extension create -g {rg} -n {name} -c {cluster_name} '
1622216359 '--extension-type {extension_type} --scope cluster '
16223- '--config useKubeletIdentity=true --no-wait '
16360+ '--config useKubeletIdentity=true --no-wait '
1622416361 '--configuration-settings blobContainer={blob} '
1622516362 'storageAccount={storageAccount} '
1622616363 'storageAccountResourceGroup={rg} '
1622716364 'storageAccountSubscriptionId={subscription}')
1622816365
16229- # Update the K8s extension
16366+ # Update the K8s extension
1623016367 self.cmd('aks extension update -g {rg} -n {name} -c {cluster_name} --yes '
1623116368 '--no-wait --configuration-settings testKey=testValue')
1623216369
@@ -16305,11 +16442,11 @@ def test_aks_extension_type_backup(self, resource_group, resource_group_location
1630516442 '--location {location} --sku Standard_LRS '
1630616443 '--allow-shared-key-access false')
1630716444
16308- # create blob container in storage account
16309- self.cmd('storage container create --name {blob} --account-name {storageAccount} '
16445+ # create blob container in storage account
16446+ self.cmd('storage container create --name {blob} --account-name {storageAccount} '
1631016447 '--auth-mode login')
1631116448
16312- # create the cluster
16449+ # create the cluster
1631316450 response = self.cmd('aks create -g {rg} -n {cluster_name} '
1631416451 '--node-count 3 --ssh-key-value={ssh_key_value}').get_output_in_json()
1631516452 cluster_resource_id = response["id"]
@@ -16321,7 +16458,7 @@ def test_aks_extension_type_backup(self, resource_group, resource_group_location
1632116458 # create the K8s extension
1632216459 self.cmd('aks extension create -g {rg} -n {name} -c {cluster_name} '
1632316460 '--extension-type {extension_type} --scope cluster '
16324- '--config useKubeletIdentity=true --no-wait '
16461+ '--config useKubeletIdentity=true --no-wait '
1632516462 '--configuration-settings blobContainer={blob} '
1632616463 'storageAccount={storageAccount} '
1632716464 'storageAccountResourceGroup={rg} '
@@ -16554,7 +16691,7 @@ def test_aks_loadbalancer_commands(
1655416691 # create_cmd = (
1655516692 # "aks create --resource-group={resource_group} --name={name} --location={location} "
1655616693 # "--ssh-key-value={ssh_key_value} "
16557- # "--vm-set-type AvailabilitySet "
16694+ # "--vm-set-type AvailabilitySet "
1655816695 # "--load-balancer-sku Basic "
1655916696 # )
1656016697 # self.cmd(
0 commit comments