-
Notifications
You must be signed in to change notification settings - Fork 1.5k
az command aks: Add KataVmIsolation workload runtime
#9104
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from all commits
5f8bb14
4d8d385
e46bd17
ec0ccfa
d02fbdf
988f5fd
21b9cf0
9a41aa1
10ad9b1
da58497
4a90d93
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Large diffs are not rendered by default.
Large diffs are not rendered by default.
Large diffs are not rendered by default.
Large diffs are not rendered by default.
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -10,7 +10,7 @@ | |
| import tempfile | ||
| import time | ||
|
|
||
| from azext_aks_preview._consts import CONST_CUSTOM_CA_TEST_CERT | ||
| from azext_aks_preview._consts import CONST_CUSTOM_CA_TEST_CERT, CONST_WORKLOAD_RUNTIME_KATA_VM_ISOLATION, CONST_WORKLOAD_RUNTIME_OLD_KATA_VM_ISOLATION | ||
| from azext_aks_preview._format import aks_machine_list_table_format | ||
| from azext_aks_preview.tests.latest.custom_preparers import ( | ||
| AKSCustomResourceGroupPreparer, | ||
|
|
@@ -3062,6 +3062,209 @@ def test_aks_nodepool_add_with_ossku_windows2025( | |
| checks=[self.is_empty()], | ||
| ) | ||
|
|
||
| @AllowLargeResponse() | ||
| @AKSCustomResourceGroupPreparer( | ||
| random_name_length=17, name_prefix="clitest", location="westus2" | ||
| ) | ||
| def test_aks_cluster_kata( | ||
|
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Queued live test to validate the change.
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Live test failed with following error
Please follow other cases as exmaples, and add
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Added!
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Re-queued live test, test passed! |
||
| self, resource_group, resource_group_location | ||
| ): | ||
| # reset the count so in replay mode the random names will start with 0 | ||
| self.test_resources_count = 0 | ||
| # kwargs for string formatting | ||
| aks_name = self.create_random_name("cliakstest", 16) | ||
| self.kwargs.update( | ||
| { | ||
| "resource_group": resource_group, | ||
| "name": aks_name, | ||
| "dns_name_prefix": self.create_random_name("cliaksdns", 16), | ||
| "location": resource_group_location, | ||
| "resource_type": "Microsoft.ContainerService/ManagedClusters", | ||
| "workload_runtime": CONST_WORKLOAD_RUNTIME_KATA_VM_ISOLATION, | ||
| "ssh_key_value": self.generate_ssh_keys(), | ||
| } | ||
| ) | ||
|
|
||
| # create | ||
| create_cmd = ( | ||
| "aks create --resource-group={resource_group} --name={name} --location={location} " | ||
| "--os-sku AzureLinux --workload-runtime {workload_runtime} --node-count 1 " | ||
| "--ssh-key-value={ssh_key_value} --node-vm-size Standard_D4s_v3" | ||
| ) | ||
| self.cmd( | ||
| create_cmd, | ||
| checks=[ | ||
| self.exists("fqdn"), | ||
| self.exists("nodeResourceGroup"), | ||
| self.check("provisioningState", "Succeeded"), | ||
| self.check("agentPoolProfiles[0].workloadRuntime", CONST_WORKLOAD_RUNTIME_KATA_VM_ISOLATION), | ||
| ], | ||
| ) | ||
|
|
||
| # delete | ||
| self.cmd( | ||
| "aks delete -g {resource_group} -n {name} --yes --no-wait", | ||
| checks=[self.is_empty()], | ||
| ) | ||
|
|
||
| @AllowLargeResponse() | ||
| @AKSCustomResourceGroupPreparer( | ||
| random_name_length=17, name_prefix="clitest", location="westus2" | ||
| ) | ||
| def test_aks_nodepool_add_with_kata( | ||
| self, resource_group, resource_group_location | ||
| ): | ||
| # reset the count so in replay mode the random names will start with 0 | ||
| self.test_resources_count = 0 | ||
| # kwargs for string formatting | ||
| aks_name = self.create_random_name("cliakstest", 16) | ||
| node_pool_name = self.create_random_name('c', 6) | ||
| node_pool_name_second = self.create_random_name('c', 6) | ||
| self.kwargs.update( | ||
| { | ||
| "resource_group": resource_group, | ||
| "name": aks_name, | ||
| "node_pool_name": node_pool_name, | ||
| "node_pool_name_second": node_pool_name_second, | ||
| "location": resource_group_location, | ||
| "resource_type": "Microsoft.ContainerService/ManagedClusters", | ||
| "workload_runtime": CONST_WORKLOAD_RUNTIME_KATA_VM_ISOLATION, | ||
| "ssh_key_value": self.generate_ssh_keys(), | ||
| } | ||
| ) | ||
|
|
||
| # create | ||
| create_cmd = ( | ||
| "aks create --resource-group={resource_group} --name={name} " | ||
| "--nodepool-name {node_pool_name} -c 1 --ssh-key-value={ssh_key_value}" | ||
| ) | ||
| self.cmd(create_cmd, checks=[ | ||
| self.check('provisioningState', 'Succeeded'), | ||
| ]) | ||
|
|
||
| # nodepool update with kata | ||
| update_cmd = ( | ||
| "aks nodepool add --cluster-name={name} --resource-group={resource_group} " | ||
| "--name={node_pool_name_second} --os-sku AzureLinux " | ||
| "--workload-runtime KataVmIsolation --node-vm-size Standard_D4s_v3" | ||
| ) | ||
|
|
||
| self.cmd( | ||
| update_cmd, | ||
| checks=[ | ||
| self.check("provisioningState", "Succeeded"), | ||
| self.check("workloadRuntime", CONST_WORKLOAD_RUNTIME_KATA_VM_ISOLATION), | ||
| ], | ||
| ) | ||
|
|
||
| # delete | ||
| self.cmd( | ||
| "aks delete -g {resource_group} -n {name} --yes --no-wait", | ||
| checks=[self.is_empty()], | ||
| ) | ||
|
|
||
| @AllowLargeResponse() | ||
| @AKSCustomResourceGroupPreparer( | ||
| random_name_length=17, name_prefix="clitest", location="westus2" | ||
| ) | ||
| def test_aks_cluster_kata_mshv_vm_isolation( | ||
| self, resource_group, resource_group_location | ||
| ): | ||
| # Testing the old kata name that is still in use in aks-preview | ||
| # reset the count so in replay mode the random names will start with 0 | ||
| self.test_resources_count = 0 | ||
| # kwargs for string formatting | ||
| aks_name = self.create_random_name("cliakstest", 16) | ||
| self.kwargs.update( | ||
| { | ||
| "resource_group": resource_group, | ||
| "name": aks_name, | ||
| "dns_name_prefix": self.create_random_name("cliaksdns", 16), | ||
| "location": resource_group_location, | ||
| "resource_type": "Microsoft.ContainerService/ManagedClusters", | ||
| "workload_runtime": CONST_WORKLOAD_RUNTIME_OLD_KATA_VM_ISOLATION, | ||
| "ssh_key_value": self.generate_ssh_keys(), | ||
| } | ||
| ) | ||
|
|
||
| # create | ||
| create_cmd = ( | ||
| "aks create --resource-group={resource_group} --name={name} --location={location} " | ||
| "--os-sku AzureLinux --workload-runtime {workload_runtime} --node-count 1 " | ||
| "--ssh-key-value={ssh_key_value} --node-vm-size Standard_D4s_v3" | ||
| ) | ||
| self.cmd( | ||
| create_cmd, | ||
| checks=[ | ||
| self.exists("fqdn"), | ||
| self.exists("nodeResourceGroup"), | ||
| self.check("provisioningState", "Succeeded"), | ||
| self.check("agentPoolProfiles[0].workloadRuntime", CONST_WORKLOAD_RUNTIME_OLD_KATA_VM_ISOLATION), | ||
| ], | ||
| ) | ||
|
|
||
| # delete | ||
| self.cmd( | ||
| "aks delete -g {resource_group} -n {name} --yes --no-wait", | ||
| checks=[self.is_empty()], | ||
| ) | ||
|
|
||
| @AllowLargeResponse() | ||
| @AKSCustomResourceGroupPreparer( | ||
| random_name_length=17, name_prefix="clitest", location="westus2" | ||
| ) | ||
| def test_aks_nodepool_add_with_kata_mshv_vm_isolation( | ||
| self, resource_group, resource_group_location | ||
| ): | ||
| # reset the count so in replay mode the random names will start with 0 | ||
| self.test_resources_count = 0 | ||
| # kwargs for string formatting | ||
| aks_name = self.create_random_name("cliakstest", 16) | ||
| node_pool_name = self.create_random_name('c', 6) | ||
| node_pool_name_second = self.create_random_name('c', 6) | ||
| self.kwargs.update( | ||
| { | ||
| "resource_group": resource_group, | ||
| "name": aks_name, | ||
| "node_pool_name": node_pool_name, | ||
| "node_pool_name_second": node_pool_name_second, | ||
| "location": resource_group_location, | ||
| "resource_type": "Microsoft.ContainerService/ManagedClusters", | ||
| "workload_runtime": CONST_WORKLOAD_RUNTIME_OLD_KATA_VM_ISOLATION, | ||
| "ssh_key_value": self.generate_ssh_keys(), | ||
| } | ||
| ) | ||
|
|
||
| # create | ||
| create_cmd = ( | ||
| "aks create --resource-group={resource_group} --name={name} " | ||
| "--nodepool-name {node_pool_name} -c 1 --ssh-key-value={ssh_key_value}" | ||
| ) | ||
| self.cmd(create_cmd, checks=[ | ||
| self.check('provisioningState', 'Succeeded'), | ||
| ]) | ||
|
|
||
| # nodepool update with kata | ||
| update_cmd = ( | ||
| "aks nodepool add --cluster-name={name} --resource-group={resource_group} " | ||
| "--name={node_pool_name_second} --os-sku AzureLinux " | ||
| "--workload-runtime {workload_runtime} --node-vm-size Standard_D4s_v3" | ||
| ) | ||
|
|
||
| self.cmd( | ||
| update_cmd, | ||
| checks=[ | ||
| self.check("provisioningState", "Succeeded"), | ||
| self.check("workloadRuntime", CONST_WORKLOAD_RUNTIME_OLD_KATA_VM_ISOLATION), | ||
| ], | ||
| ) | ||
|
|
||
| # delete | ||
| self.cmd( | ||
| "aks delete -g {resource_group} -n {name} --yes --no-wait", | ||
| checks=[self.is_empty()], | ||
| ) | ||
|
|
||
| @AllowLargeResponse() | ||
| @AKSCustomResourceGroupPreparer(random_name_length=17, name_prefix='clitest', location='westus') | ||
| def test_aks_nodepool_add_with_ossku_ubuntu2204(self, resource_group, resource_group_location): | ||
|
|
||
Uh oh!
There was an error while loading. Please reload this page.