|
| 1 | +# -------------------------------------------------------------------------------------------- |
| 2 | +# Copyright (c) Microsoft Corporation. All rights reserved. |
| 3 | +# Licensed under the MIT License. See License.txt in the project root for license information. |
| 4 | +# -------------------------------------------------------------------------------------------- |
| 5 | + |
| 6 | +import os |
| 7 | +import time |
| 8 | + |
| 9 | +from azure.cli.testsdk.scenario_tests import AllowLargeResponse |
| 10 | +from azure.cli.testsdk import (ScenarioTest, ResourceGroupPreparer, JMESPathCheck, live_only) |
| 11 | + |
| 12 | +from azext_containerapp.tests.latest.common import (write_test_file, clean_up_test_file) |
| 13 | +from .common import TEST_LOCATION |
| 14 | + |
| 15 | +TEST_DIR = os.path.abspath(os.path.join(os.path.abspath(__file__), '..')) |
| 16 | + |
| 17 | +class ContainerAppWorkloadProfilesGPUTest(ScenarioTest): |
| 18 | + def __init__(self, *arg, **kwargs): |
| 19 | + super().__init__(*arg, random_config_dir=True, **kwargs) |
| 20 | + |
| 21 | + @AllowLargeResponse(8192) |
| 22 | + @ResourceGroupPreparer(location="northeurope") |
| 23 | + def test_containerapp_create_enable_dedicated_gpu(self, resource_group): |
| 24 | + self.cmd('configure --defaults location={}'.format("northeurope")) |
| 25 | + env = self.create_random_name(prefix='gpu-env', length=24) |
| 26 | + gpu_default_name = "gpu" |
| 27 | + gpu_default_type = "NC24-A100" |
| 28 | + self.cmd('containerapp env create -g {} -n {} --logs-destination none --enable-dedicated-gpu'.format( |
| 29 | + resource_group, env), expect_failure=False, checks=[ |
| 30 | + JMESPathCheck("name", env), |
| 31 | + JMESPathCheck("properties.provisioningState", "Succeeded"), |
| 32 | + JMESPathCheck("length(properties.workloadProfiles)", 2), |
| 33 | + JMESPathCheck('properties.workloadProfiles[0].name', "Consumption", case_sensitive=False), |
| 34 | + JMESPathCheck('properties.workloadProfiles[0].workloadProfileType', "Consumption", case_sensitive=False), |
| 35 | + JMESPathCheck('properties.workloadProfiles[1].name', gpu_default_name, case_sensitive=False), |
| 36 | + JMESPathCheck('properties.workloadProfiles[1].workloadProfileType', gpu_default_type, case_sensitive=False), |
| 37 | + JMESPathCheck('properties.workloadProfiles[1].maximumCount', 1), |
| 38 | + JMESPathCheck('properties.workloadProfiles[1].minimumCount', 0), |
| 39 | + ]) |
| 40 | + containerapp_env = self.cmd('containerapp env show -g {} -n {}'.format(resource_group, env)).get_output_in_json() |
| 41 | + |
| 42 | + while containerapp_env["properties"]["provisioningState"].lower() == "waiting": |
| 43 | + time.sleep(5) |
| 44 | + containerapp_env = self.cmd('containerapp env show -g {} -n {}'.format(resource_group, env)).get_output_in_json() |
| 45 | + app1 = self.create_random_name(prefix='app1', length=24) |
| 46 | + self.cmd(f'containerapp create -n {app1} -g {resource_group} --image mcr.microsoft.com/azuredocs/samples-tf-mnist-demo:gpu --environment {env} -w {gpu_default_name} --min-replicas 1 --cpu 0.1 --memory 0.1', checks=[ |
| 47 | + JMESPathCheck("properties.provisioningState", "Succeeded"), |
| 48 | + JMESPathCheck("properties.workloadProfileName", gpu_default_name), |
| 49 | + JMESPathCheck('properties.template.containers[0].resources.cpu', '0.1'), |
| 50 | + JMESPathCheck('properties.template.containers[0].resources.memory', '0.1Gi'), |
| 51 | + JMESPathCheck('properties.template.containers[0].resources.gpu', '1'), |
| 52 | + JMESPathCheck('properties.template.scale.minReplicas', '1'), |
| 53 | + JMESPathCheck('properties.template.scale.maxReplicas', '10') |
| 54 | + ]) |
| 55 | + |
| 56 | + @AllowLargeResponse(8192) |
| 57 | + @ResourceGroupPreparer(location="eastus2") |
| 58 | + def test_containerapp_create_enable_consumption_gpu(self, resource_group): |
| 59 | + self.cmd('configure --defaults location={}'.format("northeurope")) |
| 60 | + env = self.create_random_name(prefix='consumption-gpu-env', length=24) |
| 61 | + self.cmd('containerapp env create -g {} -n {} --logs-destination none --enable-workload-profiles'.format( |
| 62 | + resource_group, env), expect_failure=False, checks=[ |
| 63 | + JMESPathCheck("name", env), |
| 64 | + JMESPathCheck("properties.provisioningState", "Succeeded"), |
| 65 | + JMESPathCheck("length(properties.workloadProfiles)", 1), |
| 66 | + JMESPathCheck('properties.workloadProfiles[0].name', "Consumption", case_sensitive=False), |
| 67 | + JMESPathCheck('properties.workloadProfiles[0].workloadProfileType', "Consumption", case_sensitive=False), |
| 68 | + ]) |
| 69 | + consumption_gpu_wp_name = "Consumption-T4" |
| 70 | + |
| 71 | + self.cmd("az containerapp env workload-profile set -g {} -n {} --workload-profile-name {consumption_gpu_wp_name} --workload-profile-type Consumption_GPU_NC8as_T4".format( |
| 72 | + resource_group, env), expect_failure=False) |
| 73 | + |
| 74 | + containerapp_env = self.cmd('containerapp env show -g {} -n {}'.format(resource_group, env)).get_output_in_json() |
| 75 | + |
| 76 | + while containerapp_env["properties"]["provisioningState"].lower() == "waiting": |
| 77 | + time.sleep(5) |
| 78 | + containerapp_env = self.cmd('containerapp env show -g {} -n {}'.format(resource_group, env)).get_output_in_json() |
| 79 | + app1 = self.create_random_name(prefix='app1', length=24) |
| 80 | + self.cmd(f'containerapp create -n {app1} -g {resource_group} --image mcr.microsoft.com/azuredocs/samples-tf-mnist-demo:gpu --environment {env} -w {consumption_gpu_wp_name} --cpu 0.1 --memory 0.1 --gpu 1', checks=[ |
| 81 | + JMESPathCheck("properties.provisioningState", "Succeeded"), |
| 82 | + JMESPathCheck("properties.workloadProfileName", consumption_gpu_wp_name), |
| 83 | + JMESPathCheck('properties.template.containers[0].resources.cpu', '0.1'), |
| 84 | + JMESPathCheck('properties.template.containers[0].resources.memory', '0.1Gi'), |
| 85 | + JMESPathCheck('properties.template.containers[0].resources.gpu', '1'), |
| 86 | + ]) |
0 commit comments