Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 5 additions & 0 deletions src/aks-preview/HISTORY.rst
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,11 @@ To release a new version, please select a new version number (usually plus 1 to
Pending
+++++++

19.0.0b12
+++++++

* `az aks create --workload-runtime KataVmIsolation`: Added the KataVmIsolation workload runtime value.

19.0.0b11
+++++++
* Remove PMK validation for `--azure-keyvault-kms-key-id` parameter.
Expand Down
3 changes: 2 additions & 1 deletion src/aks-preview/azext_aks_preview/_consts.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,8 @@
# workload runtime
CONST_WORKLOAD_RUNTIME_OCI_CONTAINER = "OCIContainer"
CONST_WORKLOAD_RUNTIME_WASM_WASI = "WasmWasi"
CONST_WORKLOAD_RUNTIME_KATA_MSHV_VM_ISOLATION = "KataMshvVmIsolation"
CONST_WORKLOAD_RUNTIME_KATA_VM_ISOLATION = "KataVmIsolation"
CONST_WORKLOAD_RUNTIME_OLD_KATA_VM_ISOLATION = "KataMshvVmIsolation"
CONST_WORKLOAD_RUNTIME_KATA_CC_ISOLATION = "KataCcIsolation"

# gpu instance
Expand Down
6 changes: 4 additions & 2 deletions src/aks-preview/azext_aks_preview/_params.py
Original file line number Diff line number Diff line change
Expand Up @@ -127,7 +127,8 @@
CONST_WEEKINDEX_SECOND,
CONST_WEEKINDEX_THIRD,
CONST_WEEKLY_MAINTENANCE_SCHEDULE,
CONST_WORKLOAD_RUNTIME_KATA_MSHV_VM_ISOLATION,
CONST_WORKLOAD_RUNTIME_KATA_VM_ISOLATION,
CONST_WORKLOAD_RUNTIME_OLD_KATA_VM_ISOLATION,
CONST_WORKLOAD_RUNTIME_KATA_CC_ISOLATION,
CONST_WORKLOAD_RUNTIME_OCI_CONTAINER,
CONST_WORKLOAD_RUNTIME_WASM_WASI,
Expand Down Expand Up @@ -314,7 +315,8 @@
workload_runtimes = [
CONST_WORKLOAD_RUNTIME_OCI_CONTAINER,
CONST_WORKLOAD_RUNTIME_WASM_WASI,
CONST_WORKLOAD_RUNTIME_KATA_MSHV_VM_ISOLATION,
CONST_WORKLOAD_RUNTIME_KATA_VM_ISOLATION,
CONST_WORKLOAD_RUNTIME_OLD_KATA_VM_ISOLATION,
CONST_WORKLOAD_RUNTIME_KATA_CC_ISOLATION,
]
gpu_instance_profiles = [
Expand Down

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@

from azext_aks_preview.__init__ import register_aks_preview_resource_type
from azext_aks_preview._client_factory import CUSTOM_MGMT_AKS_PREVIEW
from azext_aks_preview._consts import CONST_WORKLOAD_RUNTIME_OCI_CONTAINER, CONST_SSH_ACCESS_LOCALUSER, CONST_VIRTUAL_MACHINES
from azext_aks_preview._consts import CONST_WORKLOAD_RUNTIME_OCI_CONTAINER, CONST_WORKLOAD_RUNTIME_KATA_VM_ISOLATION, CONST_WORKLOAD_RUNTIME_OLD_KATA_VM_ISOLATION, CONST_SSH_ACCESS_LOCALUSER, CONST_VIRTUAL_MACHINES
from azext_aks_preview.agentpool_decorator import (
AKSPreviewAgentPoolAddDecorator,
AKSPreviewAgentPoolContext,
Expand Down Expand Up @@ -618,6 +618,35 @@ def common_get_disable_fips_image(self):
ctx_1.attach_agentpool(agentpool_1)
self.assertEqual(ctx_1.get_disable_fips_image(), True)

def common_get_enable_kata_image(self):
# testing new kata naming convention
ctx_1 = AKSPreviewAgentPoolContext(
self.cmd,
AKSAgentPoolParamDict({
"workload_runtime": CONST_WORKLOAD_RUNTIME_KATA_VM_ISOLATION,
}),
self.models,
DecoratorMode.CREATE,
self.agentpool_decorator_mode,
)
agentpool_1 = self.create_initialized_agentpool_instance(workload_runtime=CONST_WORKLOAD_RUNTIME_KATA_VM_ISOLATION)
ctx_1.attach_agentpool(agentpool_1)
self.assertEqual(ctx_1.get_workload_runtime(), CONST_WORKLOAD_RUNTIME_KATA_VM_ISOLATION)

# tesing old kata naming convention
ctx_2 = AKSPreviewAgentPoolContext(
self.cmd,
AKSAgentPoolParamDict({
"workload_runtime": CONST_WORKLOAD_RUNTIME_OLD_KATA_VM_ISOLATION,
}),
self.models,
DecoratorMode.CREATE,
self.agentpool_decorator_mode,
)
agentpool_2 = self.create_initialized_agentpool_instance(workload_runtime=CONST_WORKLOAD_RUNTIME_OLD_KATA_VM_ISOLATION)
ctx_2.attach_agentpool(agentpool_2)
self.assertEqual(ctx_2.get_workload_runtime(), CONST_WORKLOAD_RUNTIME_OLD_KATA_VM_ISOLATION)

def common_get_agentpool_windows_profile(self):
ctx_1 = AKSPreviewAgentPoolContext(
self.cmd,
Expand Down Expand Up @@ -1034,12 +1063,15 @@ def test_get_enable_vtpm(self):
def test_get_disable_vtpm(self):
self.common_get_disable_vtpm()

def common_get_enable_fips_image(self):
def test_common_get_enable_fips_image(self):
self.common_get_enable_fips_image()

def common_get_disable_fips_image(self):
def test_common_get_disable_fips_image(self):
self.common_get_disable_fips_image()

def test_common_get_enable_kata_image(self):
self.common_get_enable_kata_image()

def test_get_agentpool_windows_profile(self):
self.common_get_agentpool_windows_profile()

Expand Down Expand Up @@ -1116,9 +1148,12 @@ def test_get_disable_secure_boot(self):
def test_get_enable_vtpm(self):
self.common_get_enable_vtpm()

def common_get_enable_fips_image(self):
def test_common_get_enable_fips_image(self):
self.common_get_enable_fips_image()

def test_common_get_enable_kata_image(self):
self.common_get_enable_kata_image()

def test_get_agentpool_windows_profile(self):
self.common_get_agentpool_windows_profile()

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
import tempfile
import time

from azext_aks_preview._consts import CONST_CUSTOM_CA_TEST_CERT
from azext_aks_preview._consts import CONST_CUSTOM_CA_TEST_CERT, CONST_WORKLOAD_RUNTIME_KATA_VM_ISOLATION, CONST_WORKLOAD_RUNTIME_OLD_KATA_VM_ISOLATION
from azext_aks_preview._format import aks_machine_list_table_format
from azext_aks_preview.tests.latest.custom_preparers import (
AKSCustomResourceGroupPreparer,
Expand Down Expand Up @@ -3062,6 +3062,209 @@ def test_aks_nodepool_add_with_ossku_windows2025(
checks=[self.is_empty()],
)

@AllowLargeResponse()
@AKSCustomResourceGroupPreparer(
random_name_length=17, name_prefix="clitest", location="westus2"
)
def test_aks_cluster_kata(
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Queued live test to validate the change.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Live test failed with following error

          raise CLIError('An RSA key file or key value must be supplied to SSH Key Value. '
                           'You can use --generate-ssh-keys to let CLI generate one for you')

E knack.util.CLIError: An RSA key file or key value must be supplied to SSH Key Value. You can use --generate-ssh-keys to let CLI generate one for you

https://dev.azure.com/msazure/CloudNativeCompute/CloudNativeCompute%20Team/_build/results?buildId=142667237&view=logs&j=b162b355-d59d-5864-ce0f-0a70f12dd28b&t=a26fe913-e2be-5062-b08e-d15f1acc7ea2&l=3967

Please follow other cases as exmaples, and add "ssh_key_value": self.generate_ssh_keys(), to self.kwargs.update(, then create cluster with --ssh-key-value={ssh_key_value}

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Added!

Copy link
Member

@FumingZhang FumingZhang Nov 5, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Re-queued live test, test passed!

self, resource_group, resource_group_location
):
# reset the count so in replay mode the random names will start with 0
self.test_resources_count = 0
# kwargs for string formatting
aks_name = self.create_random_name("cliakstest", 16)
self.kwargs.update(
{
"resource_group": resource_group,
"name": aks_name,
"dns_name_prefix": self.create_random_name("cliaksdns", 16),
"location": resource_group_location,
"resource_type": "Microsoft.ContainerService/ManagedClusters",
"workload_runtime": CONST_WORKLOAD_RUNTIME_KATA_VM_ISOLATION,
"ssh_key_value": self.generate_ssh_keys(),
}
)

# create
create_cmd = (
"aks create --resource-group={resource_group} --name={name} --location={location} "
"--os-sku AzureLinux --workload-runtime {workload_runtime} --node-count 1 "
"--ssh-key-value={ssh_key_value} --node-vm-size Standard_D4s_v3"
)
self.cmd(
create_cmd,
checks=[
self.exists("fqdn"),
self.exists("nodeResourceGroup"),
self.check("provisioningState", "Succeeded"),
self.check("agentPoolProfiles[0].workloadRuntime", CONST_WORKLOAD_RUNTIME_KATA_VM_ISOLATION),
],
)

# delete
self.cmd(
"aks delete -g {resource_group} -n {name} --yes --no-wait",
checks=[self.is_empty()],
)

@AllowLargeResponse()
@AKSCustomResourceGroupPreparer(
random_name_length=17, name_prefix="clitest", location="westus2"
)
def test_aks_nodepool_add_with_kata(
self, resource_group, resource_group_location
):
# reset the count so in replay mode the random names will start with 0
self.test_resources_count = 0
# kwargs for string formatting
aks_name = self.create_random_name("cliakstest", 16)
node_pool_name = self.create_random_name('c', 6)
node_pool_name_second = self.create_random_name('c', 6)
self.kwargs.update(
{
"resource_group": resource_group,
"name": aks_name,
"node_pool_name": node_pool_name,
"node_pool_name_second": node_pool_name_second,
"location": resource_group_location,
"resource_type": "Microsoft.ContainerService/ManagedClusters",
"workload_runtime": CONST_WORKLOAD_RUNTIME_KATA_VM_ISOLATION,
"ssh_key_value": self.generate_ssh_keys(),
}
)

# create
create_cmd = (
"aks create --resource-group={resource_group} --name={name} "
"--nodepool-name {node_pool_name} -c 1 --ssh-key-value={ssh_key_value}"
)
self.cmd(create_cmd, checks=[
self.check('provisioningState', 'Succeeded'),
])

# nodepool update with kata
update_cmd = (
"aks nodepool add --cluster-name={name} --resource-group={resource_group} "
"--name={node_pool_name_second} --os-sku AzureLinux "
"--workload-runtime KataVmIsolation --node-vm-size Standard_D4s_v3"
)

self.cmd(
update_cmd,
checks=[
self.check("provisioningState", "Succeeded"),
self.check("workloadRuntime", CONST_WORKLOAD_RUNTIME_KATA_VM_ISOLATION),
],
)

# delete
self.cmd(
"aks delete -g {resource_group} -n {name} --yes --no-wait",
checks=[self.is_empty()],
)

@AllowLargeResponse()
@AKSCustomResourceGroupPreparer(
random_name_length=17, name_prefix="clitest", location="westus2"
)
def test_aks_cluster_kata_mshv_vm_isolation(
self, resource_group, resource_group_location
):
# Testing the old kata name that is still in use in aks-preview
# reset the count so in replay mode the random names will start with 0
self.test_resources_count = 0
# kwargs for string formatting
aks_name = self.create_random_name("cliakstest", 16)
self.kwargs.update(
{
"resource_group": resource_group,
"name": aks_name,
"dns_name_prefix": self.create_random_name("cliaksdns", 16),
"location": resource_group_location,
"resource_type": "Microsoft.ContainerService/ManagedClusters",
"workload_runtime": CONST_WORKLOAD_RUNTIME_OLD_KATA_VM_ISOLATION,
"ssh_key_value": self.generate_ssh_keys(),
}
)

# create
create_cmd = (
"aks create --resource-group={resource_group} --name={name} --location={location} "
"--os-sku AzureLinux --workload-runtime {workload_runtime} --node-count 1 "
"--ssh-key-value={ssh_key_value} --node-vm-size Standard_D4s_v3"
)
self.cmd(
create_cmd,
checks=[
self.exists("fqdn"),
self.exists("nodeResourceGroup"),
self.check("provisioningState", "Succeeded"),
self.check("agentPoolProfiles[0].workloadRuntime", CONST_WORKLOAD_RUNTIME_OLD_KATA_VM_ISOLATION),
],
)

# delete
self.cmd(
"aks delete -g {resource_group} -n {name} --yes --no-wait",
checks=[self.is_empty()],
)

@AllowLargeResponse()
@AKSCustomResourceGroupPreparer(
random_name_length=17, name_prefix="clitest", location="westus2"
)
def test_aks_nodepool_add_with_kata_mshv_vm_isolation(
self, resource_group, resource_group_location
):
# reset the count so in replay mode the random names will start with 0
self.test_resources_count = 0
# kwargs for string formatting
aks_name = self.create_random_name("cliakstest", 16)
node_pool_name = self.create_random_name('c', 6)
node_pool_name_second = self.create_random_name('c', 6)
self.kwargs.update(
{
"resource_group": resource_group,
"name": aks_name,
"node_pool_name": node_pool_name,
"node_pool_name_second": node_pool_name_second,
"location": resource_group_location,
"resource_type": "Microsoft.ContainerService/ManagedClusters",
"workload_runtime": CONST_WORKLOAD_RUNTIME_OLD_KATA_VM_ISOLATION,
"ssh_key_value": self.generate_ssh_keys(),
}
)

# create
create_cmd = (
"aks create --resource-group={resource_group} --name={name} "
"--nodepool-name {node_pool_name} -c 1 --ssh-key-value={ssh_key_value}"
)
self.cmd(create_cmd, checks=[
self.check('provisioningState', 'Succeeded'),
])

# nodepool update with kata
update_cmd = (
"aks nodepool add --cluster-name={name} --resource-group={resource_group} "
"--name={node_pool_name_second} --os-sku AzureLinux "
"--workload-runtime {workload_runtime} --node-vm-size Standard_D4s_v3"
)

self.cmd(
update_cmd,
checks=[
self.check("provisioningState", "Succeeded"),
self.check("workloadRuntime", CONST_WORKLOAD_RUNTIME_OLD_KATA_VM_ISOLATION),
],
)

# delete
self.cmd(
"aks delete -g {resource_group} -n {name} --yes --no-wait",
checks=[self.is_empty()],
)

@AllowLargeResponse()
@AKSCustomResourceGroupPreparer(random_name_length=17, name_prefix='clitest', location='westus')
def test_aks_nodepool_add_with_ossku_ubuntu2204(self, resource_group, resource_group_location):
Expand Down
2 changes: 1 addition & 1 deletion src/aks-preview/setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@

from setuptools import find_packages, setup

VERSION = "19.0.0b11"
VERSION = "19.0.0b12"

CLASSIFIERS = [
"Development Status :: 4 - Beta",
Expand Down
Loading