diff --git a/src/connectedk8s/HISTORY.rst b/src/connectedk8s/HISTORY.rst index 7401698f1dc..42a48c4dc42 100644 --- a/src/connectedk8s/HISTORY.rst +++ b/src/connectedk8s/HISTORY.rst @@ -3,6 +3,10 @@ Release History =============== +1.10.4 +++++++ +* Fixed the issue where the 'connectedk8s proxy' command would fail if the kubeconfig file was empty. + 1.10.3 ++++++ * Fixed linting and styling issues, and added type annotations. diff --git a/src/connectedk8s/azext_connectedk8s/__init__.py b/src/connectedk8s/azext_connectedk8s/__init__.py index d979443c192..4ea68896e31 100644 --- a/src/connectedk8s/azext_connectedk8s/__init__.py +++ b/src/connectedk8s/azext_connectedk8s/__init__.py @@ -43,7 +43,7 @@ def load_arguments(self, command: CLICommand) -> None: COMMAND_LOADER_CLS = Connectedk8sCommandsLoader __all__ = [ - "helps", - "Connectedk8sCommandsLoader", "COMMAND_LOADER_CLS", + "Connectedk8sCommandsLoader", + "helps", ] diff --git a/src/connectedk8s/azext_connectedk8s/_constants.py b/src/connectedk8s/azext_connectedk8s/_constants.py index 15eb12ba05f..ec62d057867 100644 --- a/src/connectedk8s/azext_connectedk8s/_constants.py +++ b/src/connectedk8s/azext_connectedk8s/_constants.py @@ -163,6 +163,7 @@ Create_Directory_Fault_Type = ( "Error while creating directory for placing the executable" ) +Remove_File_Fault_Type = "Error while deleting the specified file" Run_Clientproxy_Fault_Type = "Error while starting client proxy process." Post_Hybridconn_Fault_Type = ( "Error while posting hybrid connection details to proxy process" @@ -460,23 +461,20 @@ ) DNS_Check_Result_String = "DNS Result:" AZ_CLI_ADAL_TO_MSAL_MIGRATE_VERSION = "2.30.0" -CLIENT_PROXY_VERSION = "1.3.022011" +CLIENT_PROXY_VERSION = "1.3.028501" +CLIENT_PROXY_FOLDER = ".clientproxy" API_SERVER_PORT = 47011 CLIENT_PROXY_PORT = 47010 CLIENTPROXY_CLIENT_ID = "04b07795-8ddb-461a-bbee-02f9e1bf7b46" API_CALL_RETRIES = 12 DEFAULT_REQUEST_TIMEOUT = 10 # seconds -RELEASE_DATE_WINDOWS = "release12-01-23" -RELEASE_DATE_LINUX = "release12-01-23" CSP_REFRESH_TIME = 300 # Default timeout in seconds for Onboarding Helm Install DEFAULT_MAX_ONBOARDING_TIMEOUT_HELMVALUE_SECONDS = "1200" # URL constants -CSP_Storage_Url = "https://k8sconnectcsp.azureedge.net" -CSP_Storage_Url_Mooncake = "https://k8sconnectcsp.blob.core.chinacloudapi.cn" -CSP_Storage_Url_Fairfax = "https://k8sconnectcsp.azureedge.us" +CLIENT_PROXY_MCR_TARGET = "mcr.microsoft.com/azureconnectivity/proxy" HELM_STORAGE_URL = "https://k8connecthelm.azureedge.net" HELM_VERSION = "v3.12.2" Download_And_Install_Kubectl_Fault_Type = "Failed to download and install kubectl" diff --git a/src/connectedk8s/azext_connectedk8s/_fileutils.py b/src/connectedk8s/azext_connectedk8s/_fileutils.py new file mode 100644 index 00000000000..a667354a4a6 --- /dev/null +++ b/src/connectedk8s/azext_connectedk8s/_fileutils.py @@ -0,0 +1,42 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------------------------- + +import os + +from azure.cli.core import azclierror, telemetry +from knack import log + +import azext_connectedk8s._constants as consts + +logger = log.get_logger(__name__) + + +def delete_file(file_path: str, message: str, warning: bool = False) -> None: + # pylint: disable=broad-except + if os.path.isfile(file_path): + try: + os.remove(file_path) + except Exception as e: + telemetry.set_exception( + exception=e, + fault_type=consts.Remove_File_Fault_Type, + summary=f"Unable to delete file at {file_path}", + ) + if warning: + logger.warning(message) + else: + raise azclierror.FileOperationError(message + "Error: " + str(e)) from e + + +def create_directory(file_path: str, error_message: str) -> None: + try: + os.makedirs(file_path) + except Exception as e: + telemetry.set_exception( + exception=e, + fault_type=consts.Create_Directory_Fault_Type, + summary="Unable to create installation directory", + ) + raise azclierror.FileOperationError(error_message + "Error: " + str(e)) from e diff --git a/src/connectedk8s/azext_connectedk8s/_utils.py b/src/connectedk8s/azext_connectedk8s/_utils.py index d55fa4e2023..44916c79ac1 100644 --- a/src/connectedk8s/azext_connectedk8s/_utils.py +++ b/src/connectedk8s/azext_connectedk8s/_utils.py @@ -1542,18 +1542,6 @@ def az_cli(args_str: str) -> Any: return True -# def is_cli_using_msal_auth(): -# response_cli_version = az_cli("version --output json") -# try: -# cli_version = response_cli_version['azure-cli'] -# except Exception as ex: -# raise CLIInternalError(f"Unable to decode the az cli version installed: {ex}") -# if version.parse(cli_version) >= version.parse(consts.AZ_CLI_ADAL_TO_MSAL_MIGRATE_VERSION): -# return True -# else: -# return False - - def is_cli_using_msal_auth() -> bool: response_cli_version = az_cli("version --output json") try: diff --git a/src/connectedk8s/azext_connectedk8s/clientproxyhelper/__init__.py b/src/connectedk8s/azext_connectedk8s/clientproxyhelper/__init__.py new file mode 100644 index 00000000000..34913fb394d --- /dev/null +++ b/src/connectedk8s/azext_connectedk8s/clientproxyhelper/__init__.py @@ -0,0 +1,4 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------------------------- diff --git a/src/connectedk8s/azext_connectedk8s/clientproxyhelper/_binaryutils.py b/src/connectedk8s/azext_connectedk8s/clientproxyhelper/_binaryutils.py new file mode 100644 index 00000000000..48a4896aaa8 --- /dev/null +++ b/src/connectedk8s/azext_connectedk8s/clientproxyhelper/_binaryutils.py @@ -0,0 +1,277 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------------------------- +import os +import stat +import tarfile +import time +from glob import glob +from typing import List + +import oras.client # type: ignore[import-untyped] +from azure.cli.core import azclierror, telemetry +from azure.cli.core.style import Style, print_styled_text +from knack import log + +import azext_connectedk8s._constants as consts +import azext_connectedk8s._fileutils as file_utils + +logger = log.get_logger(__name__) + + +# Downloads client side proxy to connect to Arc Connectivity Platform +def install_client_side_proxy(arc_proxy_folder: str | None) -> str: + client_operating_system = _get_client_operating_system() + client_architecture = _get_client_architeture() + install_dir = _get_proxy_install_dir(arc_proxy_folder) + proxy_name = _get_proxy_filename(client_operating_system, client_architecture) + install_location = os.path.join(install_dir, proxy_name) + + # Only download new proxy if it doesn't exist already + try: + if not os.path.isfile(install_location): + if not os.path.isdir(install_dir): + file_utils.create_directory( + install_dir, + f"Failed to create client proxy directory '{install_dir}'.", + ) + # if directory exists, delete any older versions of the proxy + else: + older_version_location = _get_older_version_proxy_path( + install_dir, client_operating_system, client_architecture + ) + older_version_files = glob(older_version_location) + for f in older_version_files: + file_utils.delete_file( + f, f"failed to delete older version file {f}", warning=True + ) + + _download_proxy_from_MCR( + install_dir, proxy_name, client_operating_system, client_architecture + ) + _check_proxy_installation(install_dir, proxy_name) + + except Exception as e: + telemetry.set_exception( + exception=e, + fault_type=consts.Create_CSPExe_Fault_Type, + summary="Unable to create proxy executable", + ) + raise e + + return install_location + + +def _download_proxy_from_MCR( + dest_dir: str, proxy_name: str, operating_system: str, architecture: str +) -> None: + mar_target = ( + f"{consts.CLIENT_PROXY_MCR_TARGET}/{operating_system.lower()}/amd64/arc-proxy" + ) + logger.debug( + "Downloading Arc Connectivity Proxy from %s in Microsoft Artifact Regristy.", + mar_target, + ) + + client = oras.client.OrasClient() + t0 = time.time() + + try: + response = client.pull( + target=f"{mar_target}:{consts.CLIENT_PROXY_VERSION}", outdir=dest_dir + ) + except Exception as e: + telemetry.set_exception( + exception=e, + fault_type=consts.Download_Exe_Fault_Type, + summary="Unable to download clientproxy executable.", + ) + raise azclierror.CLIInternalError( + f"Failed to download Arc Connectivity proxy with error {e!s}. Please try again." + ) + + time_elapsed = time.time() - t0 + + proxy_data = { + "Context.Default.AzureCLI.ArcProxyDownloadTime": time_elapsed, + "Context.Default.AzureCLI.ArcProxyVersion": consts.CLIENT_PROXY_VERSION, + } + telemetry.add_extension_event("connectedk8s", proxy_data) + + proxy_package_path = _get_proxy_package_path_from_oras_response(response) + _extract_proxy_tar_files(proxy_package_path, dest_dir, proxy_name) + file_utils.delete_file( + proxy_package_path, + f"Failed to delete {proxy_package_path}. Please delete manually.", + True, + ) + + +def _get_proxy_package_path_from_oras_response(pull_response: List[str]) -> str: + if not isinstance(pull_response, list): + raise azclierror.CLIInternalError( + "Attempt to download Arc Connectivity Proxy returned unnexpected result. Please try again." + ) + + if len(pull_response) != 1: + for r in pull_response: + file_utils.delete_file( + r, f"Failed to delete {r}. Please delete it manually.", True + ) + raise azclierror.CLIInternalError( + "Attempt to download Arc Connectivity Proxy returned unnexpected result. Please try again." + ) + + proxy_package_path = pull_response[0] + + if not os.path.isfile(proxy_package_path): + raise azclierror.CLIInternalError( + "Unable to download Arc Connectivity Proxy. Please try again." + ) + + logger.debug("Proxy package downloaded to %s", proxy_package_path) + + return proxy_package_path + + +def _extract_proxy_tar_files( + proxy_package_path: str, install_dir: str, proxy_name: str +) -> None: + with tarfile.open(proxy_package_path, "r:gz") as tar: + members = [] + for member in tar.getmembers(): + if member.isfile(): + filenames = member.name.split("/") + + if len(filenames) != 2: + tar.close() + file_utils.delete_file( + proxy_package_path, + f"Failed to delete {proxy_package_path}. Please delete it manually.", + True, + ) + raise azclierror.CLIInternalError( + "Attempt to download Arc Connectivity Proxy returned unnexpected result. Please try again." + ) + + member.name = filenames[1] + + if member.name.startswith("arcproxy"): + member.name = proxy_name + elif member.name.lower() not in ["license.txt", "thirdpartynotice.txt"]: + tar.close() + file_utils.delete_file( + proxy_package_path, + f"Failed to delete {proxy_package_path}. Please delete it manually.", + True, + ) + raise azclierror.CLIInternalError( + "Attempt to download Arc Connectivity Proxy returned unnexpected result. Please try again." + ) + + members.append(member) + + tar.extractall(members=members, path=install_dir) + + +def _check_proxy_installation(install_dir: str, proxy_name: str) -> None: + proxy_filepath = os.path.join(install_dir, proxy_name) + os.chmod(proxy_filepath, os.stat(proxy_filepath).st_mode | stat.S_IXUSR) + if os.path.isfile(proxy_filepath): + print_styled_text( + ( + Style.SUCCESS, + f"Successfuly installed Arc Connectivity Proxy file {proxy_filepath}", + ) + ) + else: + raise azclierror.CLIInternalError( + "Failed to install required Arc Connectivity Proxy. " + f"Couldn't find expected file {proxy_filepath}. Please try again." + ) + + license_files = ["LICENSE.txt", "ThirdPartyNotice.txt"] + for file in license_files: + file_location = os.path.join(install_dir, file) + if os.path.isfile(file_location): + print_styled_text( + ( + Style.SUCCESS, + f"Successfuly installed Arc Connectivity Proxy License file {file_location}", + ) + ) + else: + logger.warning( + "Failed to download Arc Connectivity Proxy license file %s. Clouldn't find expected file %s. " + "This won't affect your connection.", + file, + file_location, + ) + + +def _get_proxy_filename(operating_system: str, architecture: str) -> str: + if operating_system.lower() == "darwin" and architecture == "386": + raise azclierror.BadRequestError("Unsupported Darwin OS with 386 architecture.") + proxy_filename = ( + f"arcProxy{operating_system.capitalize()}{consts.CLIENT_PROXY_VERSION}" + ) + if operating_system.lower() == "windows": + proxy_filename += ".exe" + return proxy_filename + + +def _get_older_version_proxy_path( + install_dir: str, operating_system: str, architecture: str +) -> str: + proxy_name = f"arcProxy{operating_system.capitalize()}*" + return os.path.join(install_dir, proxy_name) + + +def _get_proxy_install_dir(arc_proxy_folder: str | None) -> str: + if not arc_proxy_folder: + return os.path.expanduser(os.path.join("~", consts.CLIENT_PROXY_FOLDER)) + return arc_proxy_folder + + +def _get_client_architeture() -> str: + import platform + + machine = platform.machine() + architecture = None + + logger.debug("Platform architecture: %s", machine) + + if "arm64" in machine.lower() or "aarch64" in machine.lower(): + architecture = "arm64" + elif machine.endswith("64"): + architecture = "amd64" + elif machine.endswith("86"): + architecture = "386" + elif machine == "": + raise azclierror.ClientRequestError( + "Couldn't identify the platform architecture." + ) + else: + raise azclierror.ClientRequestError( + f"Unsuported architecture: {machine} is not currently supported" + ) + + return architecture + + +def _get_client_operating_system() -> str: + import platform + + operating_system = platform.system() + + if operating_system.lower() not in ("linux", "darwin", "windows"): + telemetry.set_exception( + exception="Unsupported OS", + fault_type=consts.Unsupported_Fault_Type, + summary=f"{operating_system} is not supported yet", + ) + raise azclierror.ClientRequestError( + f"The {operating_system} platform is not currently supported." + ) + return operating_system diff --git a/src/connectedk8s/azext_connectedk8s/clientproxyhelper/_enums.py b/src/connectedk8s/azext_connectedk8s/clientproxyhelper/_enums.py new file mode 100644 index 00000000000..a94c5a92acd --- /dev/null +++ b/src/connectedk8s/azext_connectedk8s/clientproxyhelper/_enums.py @@ -0,0 +1,22 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------------------------- +from __future__ import annotations + +from enum import Enum + + +class ProxyStatus(Enum): + FirstRun = 0 + HCTokenRefresh = 1 + AccessTokenRefresh = 2 + AllRefresh = 3 + + @classmethod + def should_hc_token_refresh(cls, status: ProxyStatus) -> bool: + return status in {cls.FirstRun, cls.HCTokenRefresh, cls.AllRefresh} + + @classmethod + def should_access_token_refresh(cls, status: ProxyStatus) -> bool: + return status in {cls.FirstRun, cls.AccessTokenRefresh, cls.AllRefresh} diff --git a/src/connectedk8s/azext_connectedk8s/clientproxyhelper/_proxylogic.py b/src/connectedk8s/azext_connectedk8s/clientproxyhelper/_proxylogic.py new file mode 100644 index 00000000000..71345064af6 --- /dev/null +++ b/src/connectedk8s/azext_connectedk8s/clientproxyhelper/_proxylogic.py @@ -0,0 +1,124 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------------------------- +from __future__ import annotations + +from typing import TYPE_CHECKING, Any + +from azure.cli.core import telemetry + +import azext_connectedk8s._constants as consts +import azext_connectedk8s.clientproxyhelper._utils as clientproxyutils + +from ..vendored_sdks.models import ( + ListClusterUserCredentialProperties, +) + +if TYPE_CHECKING: + from subprocess import Popen + + from knack.commands import CLICommmand + from requests.models import Response + + from azext_connectedk8s.vendored_sdks.preview_2024_07_01.models import ( + CredentialResults, + ) + from azext_connectedk8s.vendored_sdks.preview_2024_07_01.operations import ( + ConnectedClusterOperations, + ) + + +def handle_post_at_to_csp( + cmd: CLICommmand, + api_server_port: int, + tenant_id: str, + clientproxy_process: Popen[bytes], +) -> int: + kid = clientproxyutils.fetch_pop_publickey_kid(api_server_port, clientproxy_process) + post_at_response, expiry = clientproxyutils.fetch_and_post_at_to_csp( + cmd, api_server_port, tenant_id, kid, clientproxy_process + ) + + if post_at_response.status_code != 200: + if ( + post_at_response.status_code == 500 + and "public key expired" in post_at_response.text + ): + # Handle public key rotation + telemetry.set_exception( + exception=post_at_response.text, + fault_type=consts.PoP_Public_Key_Expried_Fault_Type, + summary="PoP public key has expired", + ) + kid = clientproxyutils.fetch_pop_publickey_kid( + api_server_port, clientproxy_process + ) # Fetch rotated public key + # Retry posting AT with the new public key + post_at_response, expiry = clientproxyutils.fetch_and_post_at_to_csp( + cmd, api_server_port, tenant_id, kid, clientproxy_process + ) + # If after second try we still dont get a 200, raise error + if post_at_response.status_code != 200: + telemetry.set_exception( + exception=post_at_response.text, + fault_type=consts.Post_AT_To_ClientProxy_Failed_Fault_Type, + summary="Failed to post access token to client proxy", + ) + clientproxyutils.close_subprocess_and_raise_cli_error( + clientproxy_process, + "Failed to post access token to client proxy" + post_at_response.text, + ) + + return expiry + + +def get_cluster_user_credentials( + client: ConnectedClusterOperations, + resource_group_name: str, + cluster_name: str, + auth_method: str, +) -> CredentialResults: + list_prop = ListClusterUserCredentialProperties( + authentication_method=auth_method, client_proxy=True + ) + + result: CredentialResults = client.list_cluster_user_credential( # type: ignore[call-overload] + resource_group_name, + cluster_name, + list_prop, + ) + return result + + +def post_register_to_proxy( + data: dict[str, Any], + token: str | None, + client_proxy_port: int, + subscription_id: str, + resource_group_name: str, + cluster_name: str, + clientproxy_process: Popen[bytes], +) -> Response: + if token is not None: + data["kubeconfigs"][0]["value"] = clientproxyutils.insert_token_in_kubeconfig( + data, token + ) + + uri = ( + f"http://localhost:{client_proxy_port}/subscriptions/{subscription_id}/resourceGroups/{resource_group_name}" + f"/providers/Microsoft.Kubernetes/connectedClusters/{cluster_name}/register?api-version=2020-10-01" + ) + + # Posting hybrid connection details to proxy in order to get kubeconfig + response = clientproxyutils.make_api_call_with_retries( + uri, + data, + "post", + False, + consts.Post_Hybridconn_Fault_Type, + "Unable to post hybrid connection details to clientproxy", + "Failed to pass hybrid connection details to proxy.", + clientproxy_process, + ) + return response diff --git a/src/connectedk8s/azext_connectedk8s/_clientproxyutils.py b/src/connectedk8s/azext_connectedk8s/clientproxyhelper/_utils.py similarity index 84% rename from src/connectedk8s/azext_connectedk8s/_clientproxyutils.py rename to src/connectedk8s/azext_connectedk8s/clientproxyhelper/_utils.py index 8d6176245dc..dcf9469d430 100644 --- a/src/connectedk8s/azext_connectedk8s/_clientproxyutils.py +++ b/src/connectedk8s/azext_connectedk8s/clientproxyhelper/_utils.py @@ -34,6 +34,10 @@ from knack.commands import CLICommand + from azext_connectedk8s.vendored_sdks.preview_2024_07_01.models import ( + CredentialResults, + ) + logger = get_logger(__name__) @@ -128,7 +132,7 @@ def fetch_and_post_at_to_csp( tenant_id: str, kid: str, clientproxy_process: Popen[bytes], -) -> requests.Response: +) -> tuple[requests.Response, int]: req_cnfJSON = {"kid": kid, "xms_ksl": "sw"} req_cnf = base64.urlsafe_b64encode(json.dumps(req_cnfJSON).encode("utf-8")).decode( "utf-8" @@ -182,7 +186,7 @@ def fetch_and_post_at_to_csp( ) sys.stderr = original_stderr - return post_at_response + return post_at_response, accessToken.expires_on def insert_token_in_kubeconfig(data: dict[str, Any], token: str) -> str: @@ -195,6 +199,26 @@ def insert_token_in_kubeconfig(data: dict[str, Any], token: str) -> str: return b64kubeconfig +# Prepare data as needed by client proxy executable +def prepare_clientproxy_data(response: CredentialResults) -> dict[str, Any]: + data: dict[str, Any] = {} + data["kubeconfigs"] = [] + kubeconfig = {} + kubeconfig["name"] = "Kubeconfig" + kubeconfig["value"] = b64encode(response.kubeconfigs[0].value).decode("utf-8") # type: ignore[index] + data["kubeconfigs"].append(kubeconfig) + data["hybridConnectionConfig"] = {} + data["hybridConnectionConfig"]["relay"] = response.hybrid_connection_config.relay # type: ignore[attr-defined] + data["hybridConnectionConfig"]["hybridConnectionName"] = ( + response.hybrid_connection_config.hybrid_connection_name # type: ignore[attr-defined] + ) + data["hybridConnectionConfig"]["token"] = response.hybrid_connection_config.token # type: ignore[attr-defined] + data["hybridConnectionConfig"]["expirationTime"] = ( + response.hybrid_connection_config.expiration_time # type: ignore[attr-defined] + ) + return data + + def check_process(processName: str) -> bool: """ Check if there is any running process that contains the given name processName. diff --git a/src/connectedk8s/azext_connectedk8s/custom.py b/src/connectedk8s/azext_connectedk8s/custom.py index 466c57cd5a2..7c684e527d2 100644 --- a/src/connectedk8s/azext_connectedk8s/custom.py +++ b/src/connectedk8s/azext_connectedk8s/custom.py @@ -14,12 +14,11 @@ import re import shutil import stat -import sys import tempfile import time import urllib.request from base64 import b64decode, b64encode -from glob import glob +from concurrent.futures import ThreadPoolExecutor from subprocess import DEVNULL, PIPE, Popen from typing import TYPE_CHECKING, Any, Iterable @@ -36,7 +35,6 @@ ManualInterrupt, MutuallyExclusiveArgumentError, RequiredArgumentMissingError, - UnclassifiedUserFault, ValidationError, ) from azure.cli.core.commands import LongRunningOperation @@ -53,16 +51,19 @@ from kubernetes.config.kube_config import KubeConfigMerger from packaging import version -import azext_connectedk8s._clientproxyutils as clientproxyutils import azext_connectedk8s._constants as consts import azext_connectedk8s._precheckutils as precheckutils import azext_connectedk8s._troubleshootutils as troubleshootutils import azext_connectedk8s._utils as utils +import azext_connectedk8s.clientproxyhelper._binaryutils as proxybinaryutils +import azext_connectedk8s.clientproxyhelper._proxylogic as proxylogic +import azext_connectedk8s.clientproxyhelper._utils as clientproxyutils from azext_connectedk8s._client_factory import ( cf_connectedmachine, cf_resource_groups, resource_providers_client, ) +from azext_connectedk8s.clientproxyhelper._enums import ProxyStatus from .vendored_sdks.preview_2024_07_01.models import ( ArcAgentProfile, @@ -71,7 +72,6 @@ ConnectedClusterIdentity, ConnectedClusterPatch, Gateway, - ListClusterUserCredentialProperties, OidcIssuerProfile, SecurityProfile, SecurityProfileWorkloadIdentity, @@ -84,10 +84,8 @@ from knack.commands import CLICommmand from kubernetes.client import V1NodeList from kubernetes.config.kube_config import ConfigNode + from requests.models import Response - from azext_connectedk8s.vendored_sdks.preview_2024_07_01.models import ( - CredentialResults, - ) from azext_connectedk8s.vendored_sdks.preview_2024_07_01.operations import ( ConnectedClusterOperations, ) @@ -2289,10 +2287,6 @@ def update_connected_cluster( if disable_proxy: helm_content_values["global.isProxyEnabled"] = "False" - # Disable proxy if disable_proxy flag is set - if disable_proxy: - helm_content_values["global.isProxyEnabled"] = "False" - # Set agent version in registry path if connected_cluster.agent_version is not None: agent_version = connected_cluster.agent_version # type: ignore[unreachable] @@ -3271,7 +3265,7 @@ def disable_cluster_connect( def load_kubernetes_configuration(filename: str) -> dict[str, Any]: try: with open(filename) as stream: - k8s_config: dict[str, Any] = yaml.safe_load(stream) + k8s_config: dict[str, Any] = yaml.safe_load(stream) or {} return k8s_config except OSError as ex: if getattr(ex, "errno", 0) == errno.ENOENT: @@ -3373,10 +3367,13 @@ def merge_kubernetes_configurations( except (KeyError, TypeError): continue - handle_merge(existing, addition, "clusters", replace) - handle_merge(existing, addition, "users", replace) - handle_merge(existing, addition, "contexts", replace) - existing["current-context"] = addition["current-context"] + if not existing: + existing = addition + else: + handle_merge(existing, addition, "clusters", replace) + handle_merge(existing, addition, "users", replace) + handle_merge(existing, addition, "contexts", replace) + existing["current-context"] = addition["current-context"] # check that ~/.kube/config is only read- and writable by its owner if platform.system() != "Windows": @@ -3475,7 +3472,7 @@ def client_side_proxy_wrapper( ) args = [] - operating_system = platform.system() + operating_system = proxybinaryutils._get_client_operating_system() proc_name = f"arcProxy{operating_system}" telemetry.set_debug_info("CSP Version is ", consts.CLIENT_PROXY_VERSION) @@ -3509,103 +3506,10 @@ def client_side_proxy_wrapper( if port_error_string != "": raise ClientRequestError(port_error_string) - # Set csp url based on cloud - CSP_Url = consts.CSP_Storage_Url - if cloud == consts.Azure_ChinaCloudName: - CSP_Url = consts.CSP_Storage_Url_Mooncake - elif cloud == consts.Azure_USGovCloudName: - CSP_Url = consts.CSP_Storage_Url_Fairfax - - # Creating installation location, request uri and older version exe location depending on OS - if operating_system == "Windows": - install_location_string = ( - f".clientproxy\\arcProxy{operating_system}{consts.CLIENT_PROXY_VERSION}.exe" - ) - requestUri = f"{CSP_Url}/{consts.RELEASE_DATE_WINDOWS}/arcProxy{operating_system}{consts.CLIENT_PROXY_VERSION}.exe" - older_version_string = f".clientproxy\\arcProxy{operating_system}*.exe" - creds_string = r".azure\accessTokens.json" - - elif operating_system == "Linux" or operating_system == "Darwin": - install_location_string = ( - f".clientproxy/arcProxy{operating_system}{consts.CLIENT_PROXY_VERSION}" - ) - requestUri = f"{CSP_Url}/{consts.RELEASE_DATE_LINUX}/arcProxy{operating_system}{consts.CLIENT_PROXY_VERSION}" - older_version_string = f".clientproxy/arcProxy{operating_system}*" - creds_string = r".azure/accessTokens.json" - - else: - telemetry.set_exception( - exception="Unsupported OS", - fault_type=consts.Unsupported_Fault_Type, - summary=f"{operating_system} is not supported yet", - ) - raise ClientRequestError( - f"The {operating_system} platform is not currently supported." - ) - - install_location = os.path.expanduser(os.path.join("~", install_location_string)) + install_location = proxybinaryutils.install_client_side_proxy(None) args.append(install_location) install_dir = os.path.dirname(install_location) - # If version specified by install location doesnt exist, then download the executable - if not os.path.isfile(install_location): - print("Setting up environment for first time use. This can take few minutes...") - # Downloading the executable - try: - response = urllib.request.urlopen(requestUri) - except Exception as e: - telemetry.set_exception( - exception=e, - fault_type=consts.Download_Exe_Fault_Type, - summary="Unable to download clientproxy executable.", - ) - raise CLIInternalError( - f"Failed to download executable with client: {e}", - recommendation="Please check your internet connection.", - ) - - responseContent = response.read() - response.close() - - # Creating the .clientproxy folder if it doesnt exist - if not os.path.exists(install_dir): - try: - os.makedirs(install_dir) - except Exception as e: - telemetry.set_exception( - exception=e, - fault_type=consts.Create_Directory_Fault_Type, - summary="Unable to create installation directory", - ) - raise ClientRequestError( - "Failed to create installation directory." + str(e) - ) - else: - older_version_string = os.path.expanduser( - os.path.join("~", older_version_string) - ) - older_version_files = glob(older_version_string) - - # Removing older executables from the directory - for file_ in older_version_files: - try: - os.remove(file_) - except OSError: - logger.warning("failed to delete older version files") - - try: - with open(install_location, "wb") as f: - f.write(responseContent) - except Exception as e: - telemetry.set_exception( - exception=e, - fault_type=consts.Create_CSPExe_Fault_Type, - summary="Unable to create proxy executable", - ) - raise ClientRequestError("Failed to create proxy executable." + str(e)) - - os.chmod(install_location, os.stat(install_location).st_mode | stat.S_IXUSR) - # Creating config file to pass config to clientproxy config_file_location = os.path.join(install_dir, "config.yml") @@ -3622,7 +3526,6 @@ def client_side_proxy_wrapper( # initializations user_type = "sat" - creds = "" dict_file: dict[str, Any] = { "server": { "httpPort": int(client_proxy_port), @@ -3643,47 +3546,6 @@ def client_side_proxy_wrapper( else: dict_file["identity"]["clientID"] = account["user"]["name"] - if not utils.is_cli_using_msal_auth(): - # Fetching creds - creds_location = os.path.expanduser(os.path.join("~", creds_string)) - try: - with open(creds_location) as f: - creds_list = json.load(f) - except Exception as e: - telemetry.set_exception( - exception=e, - fault_type=consts.Load_Creds_Fault_Type, - summary="Unable to load accessToken.json", - ) - raise FileOperationError("Failed to load credentials." + str(e)) - - user_name = account["user"]["name"] - - if user_type == "user": - key = "userId" - key2 = "refreshToken" - else: - key = "servicePrincipalId" - key2 = "accessToken" - - for i in range(len(creds_list)): - creds_obj = creds_list[i] - - if key in creds_obj and creds_obj[key] == user_name: - creds = creds_obj[key2] - break - - if creds == "": - telemetry.set_exception( - exception="Credentials of user not found.", - fault_type=consts.Creds_NotFound_Fault_Type, - summary="Unable to find creds of user", - ) - raise UnclassifiedUserFault("Credentials of user not found.") - - if user_type != "user": - dict_file["identity"]["clientSecret"] = creds - if cloud == "DOGFOOD": dict_file["cloud"] = "AzureDogFood" elif cloud == consts.Azure_ChinaCloudName: @@ -3738,13 +3600,9 @@ def client_side_proxy_wrapper( client, resource_group_name, cluster_name, - 0, args, client_proxy_port, api_server_port, - operating_system, - creds, - user_type, debug_mode, token=token, path=path, @@ -3752,89 +3610,68 @@ def client_side_proxy_wrapper( ) -# Prepare data as needed by client proxy executable -def prepare_clientproxy_data(response: CredentialResults) -> dict[str, Any]: - data: dict[str, Any] = {} - data["kubeconfigs"] = [] - kubeconfig = {} - kubeconfig["name"] = "Kubeconfig" - kubeconfig["value"] = b64encode(response.kubeconfigs[0].value).decode("utf-8") # type: ignore[index] - data["kubeconfigs"].append(kubeconfig) - data["hybridConnectionConfig"] = {} - data["hybridConnectionConfig"]["relay"] = response.hybrid_connection_config.relay # type: ignore[attr-defined] - data["hybridConnectionConfig"]["hybridConnectionName"] = ( - response.hybrid_connection_config.hybrid_connection_name # type: ignore[attr-defined] - ) - data["hybridConnectionConfig"]["token"] = response.hybrid_connection_config.token # type: ignore[attr-defined] - data["hybridConnectionConfig"]["expirationTime"] = ( - response.hybrid_connection_config.expiration_time # type: ignore[attr-defined] - ) - return data - - def client_side_proxy_main( cmd: CLICommmand, tenant_id: str, client: ConnectedClusterOperations, resource_group_name: str, cluster_name: str, - flag: int, args: list[str], client_proxy_port: int, api_server_port: int, - operating_system: str, - creds: str, - user_type: str, debug_mode: bool, token: str | None = None, path: str = os.path.join(os.path.expanduser("~"), ".kube", "config"), context_name: str | None = None, ) -> None: - expiry, clientproxy_process = client_side_proxy( + hc_expiry, at_expiry, clientproxy_process = client_side_proxy( cmd, tenant_id, client, resource_group_name, cluster_name, - 0, + ProxyStatus.FirstRun, args, client_proxy_port, api_server_port, - operating_system, - creds, - user_type, debug_mode, token=token, path=path, context_name=context_name, clientproxy_process=None, ) - next_refresh_time = expiry - consts.CSP_REFRESH_TIME while True: time.sleep(60) if clientproxyutils.check_if_csp_is_running(clientproxy_process): - if time.time() >= next_refresh_time: - expiry, clientproxy_process = client_side_proxy( + flag = None + if time.time() >= (hc_expiry - consts.CSP_REFRESH_TIME): + flag = ProxyStatus.HCTokenRefresh + elif time.time() >= (at_expiry - consts.CSP_REFRESH_TIME): + flag = ProxyStatus.AccessTokenRefresh + + if flag is not None: + new_hc_expiry, new_at_expiry, clientproxy_process = client_side_proxy( cmd, tenant_id, client, resource_group_name, cluster_name, - 1, + flag, args, client_proxy_port, api_server_port, - operating_system, - creds, - user_type, debug_mode, token=token, path=path, context_name=context_name, clientproxy_process=clientproxy_process, ) - next_refresh_time = expiry - consts.CSP_REFRESH_TIME + if flag == ProxyStatus.HCTokenRefresh: + hc_expiry = new_hc_expiry + elif flag == ProxyStatus.AccessTokenRefresh: + at_expiry = new_at_expiry + else: telemetry.set_exception( exception="Process closed externally.", @@ -3850,43 +3687,36 @@ def client_side_proxy( client: ConnectedClusterOperations, resource_group_name: str, cluster_name: str, - flag: int, + flag: ProxyStatus, args: list[str], client_proxy_port: int, api_server_port: int, - operating_system: str, - creds: str, - user_type: str, debug_mode: bool, token: str | None = None, path: str = os.path.join(os.path.expanduser("~"), ".kube", "config"), context_name: str | None = None, clientproxy_process: Popen[bytes] | None = None, -) -> tuple[int, Popen[bytes]]: +) -> tuple[int, int, Popen[bytes]]: subscription_id = get_subscription_id(cmd.cli_ctx) auth_method = "Token" if token is not None else "AAD" + hc_expiry, at_expiry = 0, 0 + # Fetching hybrid connection details from Userrp - try: - list_prop = ListClusterUserCredentialProperties( - authentication_method=auth_method, client_proxy=True - ) - cluster_user_credentials = client.list_cluster_user_credential( - resource_group_name, cluster_name, list_prop - ) - except Exception as e: - if flag == 1: - assert clientproxy_process is not None - clientproxy_process.terminate() - utils.arm_exception_handler( - e, - consts.Get_Credentials_Failed_Fault_Type, - "Unable to list cluster user credentials", - ) - raise CLIInternalError(f"Failed to get credentials: {e}") + # We do this in a separate process to avoid blocking the main thread + # Since we still need to bring up the proxy and make API calls to it. + if ProxyStatus.should_hc_token_refresh(flag): + with ThreadPoolExecutor() as executor: + future_get_cluster_user_credentials = executor.submit( + proxylogic.get_cluster_user_credentials, + client, + resource_group_name, + cluster_name, + auth_method, + ) # Starting the client proxy process, if this is the first time that this function is invoked - if flag == 0: + if flag == ProxyStatus.FirstRun: try: if debug_mode: clientproxy_process = Popen(args) @@ -3902,96 +3732,43 @@ def client_side_proxy( ) raise CLIInternalError(f"Failed to start proxy process: {e}") - # refresh token approach if cli is using ADAL auth (for cli < 2.30.0) - if (not utils.is_cli_using_msal_auth()) and user_type == "user": - identity_data = {} - identity_data["refreshToken"] = creds - identity_uri = f"https://localhost:{api_server_port}/identity/rt" - - # Needed to prevent skip tls warning from printing to the console - original_stderr = sys.stderr - with open(os.devnull, "w") as f: - sys.stderr = f - - clientproxyutils.make_api_call_with_retries( - identity_uri, - identity_data, - "post", - False, - consts.Post_RefreshToken_Fault_Type, - "Unable to post refresh token details to clientproxy", - "Failed to pass refresh token details to proxy.", - clientproxy_process, - ) - sys.stderr = original_stderr - assert clientproxy_process is not None - if token is None and ( - utils.is_cli_using_msal_auth() - ): # jwt token approach if cli is using MSAL. This is for cli >= 2.30.0 - kid = clientproxyutils.fetch_pop_publickey_kid( - api_server_port, clientproxy_process - ) - post_at_response = clientproxyutils.fetch_and_post_at_to_csp( - cmd, api_server_port, tenant_id, kid, clientproxy_process - ) - if post_at_response.status_code != 200: - if ( - post_at_response.status_code == 500 - and "public key expired" in post_at_response.text - ): - # pop public key must have been rotated - telemetry.set_exception( - exception=post_at_response.text, - fault_type=consts.PoP_Public_Key_Expried_Fault_Type, - summary="PoP public key has expired", - ) - kid = clientproxyutils.fetch_pop_publickey_kid( - api_server_port, clientproxy_process - ) # fetch the rotated PoP public key - # fetch and post the at corresponding to the new public key - clientproxyutils.fetch_and_post_at_to_csp( - cmd, api_server_port, tenant_id, kid, clientproxy_process - ) - else: - telemetry.set_exception( - exception=post_at_response.text, - fault_type=consts.Post_AT_To_ClientProxy_Failed_Fault_Type, - summary="Failed to post access token to client proxy", - ) - clientproxyutils.close_subprocess_and_raise_cli_error( - clientproxy_process, - "Failed to post access token to client proxy" - + post_at_response.text, - ) + if token is None and ProxyStatus.should_access_token_refresh(flag): + # jwt token approach if cli is using MSAL. This is for cli >= 2.30.0 + at_expiry = proxylogic.handle_post_at_to_csp( + cmd, api_server_port, tenant_id, clientproxy_process + ) - data = prepare_clientproxy_data(cluster_user_credentials) - expiry = data["hybridConnectionConfig"]["expirationTime"] + # Check hybrid connection details from Userrp + response: Response - if token is not None: - data["kubeconfigs"][0]["value"] = clientproxyutils.insert_token_in_kubeconfig( - data, token - ) + if ProxyStatus.should_hc_token_refresh(flag): + try: + response_data = future_get_cluster_user_credentials.result() + except Exception as e: + clientproxy_process.terminate() + utils.arm_exception_handler( + e, + consts.Get_Credentials_Failed_Fault_Type, + "Unable to list cluster user credentials", + ) + raise CLIInternalError(f"Failed to get credentials: {e}") - uri = ( - f"http://localhost:{client_proxy_port}/subscriptions/{subscription_id}/resourceGroups/{resource_group_name}" - f"/providers/Microsoft.Kubernetes/connectedClusters/{cluster_name}/register?api-version=2020-10-01" - ) + data = clientproxyutils.prepare_clientproxy_data(response_data) + hc_expiry = data["hybridConnectionConfig"]["expirationTime"] - # Posting hybrid connection details to proxy in order to get kubeconfig - response = clientproxyutils.make_api_call_with_retries( - uri, - data, - "post", - False, - consts.Post_Hybridconn_Fault_Type, - "Unable to post hybrid connection details to clientproxy", - "Failed to pass hybrid connection details to proxy.", - clientproxy_process, - ) + response = proxylogic.post_register_to_proxy( + data, + token, + client_proxy_port, + subscription_id, + resource_group_name, + cluster_name, + clientproxy_process, + ) - if flag == 0: + if flag == ProxyStatus.FirstRun: # Decoding kubeconfig into a string try: kubeconfig = json.loads(response.text) @@ -4034,7 +3811,7 @@ def client_side_proxy( clientproxy_process, "Failed to merge kubeconfig." + str(e) ) - return expiry, clientproxy_process + return hc_expiry, at_expiry, clientproxy_process def check_cl_registration_and_get_oid( diff --git a/src/connectedk8s/azext_connectedk8s/tests/latest/test_connectedk8s_scenario.py b/src/connectedk8s/azext_connectedk8s/tests/latest/test_connectedk8s_scenario.py index cca97a6ef15..0d6c2377041 100644 --- a/src/connectedk8s/azext_connectedk8s/tests/latest/test_connectedk8s_scenario.py +++ b/src/connectedk8s/azext_connectedk8s/tests/latest/test_connectedk8s_scenario.py @@ -43,8 +43,10 @@ CONFIG = json.load(f) for key in CONFIG: if not CONFIG[key]: - raise RequiredArgumentMissingError(f"Missing required configuration in {config_path} file. Make sure all \ -properties are populated.") + raise RequiredArgumentMissingError( + f"Missing required configuration in {config_path} file. Make sure all \ +properties are populated." + ) def _get_test_data_file(filename): @@ -148,8 +150,10 @@ def install_kubectl_client(): elif operating_system == "linux" or operating_system == "darwin": kubectl_path = os.path.join(kubectl_filepath, "kubectl") else: - logger.warning(f"The {operating_system} platform is not currently supported for installing kubectl \ -client.") + logger.warning( + f"The {operating_system} platform is not currently supported for installing kubectl \ +client." + ) return if os.path.isfile(kubectl_path): @@ -712,8 +716,10 @@ def test_upgrade(self, resource_group): "connectedk8s upgrade -g {rg} -n {name} --kube-config {kubeconfig} --kube-context \ {managed_cluster_name}-admin" ) - response = requests.post(f'https://{CONFIG["location"]}.dp.kubernetesconfiguration.azure.com/azure-\ - arc-k8sagents/GetLatestHelmPackagePath?api-version=2019-11-01-preview&releaseTrain=stable') + response = requests.post( + f'https://{CONFIG["location"]}.dp.kubernetesconfiguration.azure.com/azure-\ + arc-k8sagents/GetLatestHelmPackagePath?api-version=2019-11-01-preview&releaseTrain=stable' + ) jsonData = json.loads(response.text) repo_path = jsonData["repositoryPath"] index_value = 0 diff --git a/src/connectedk8s/setup.py b/src/connectedk8s/setup.py index a190ea577a4..6c86af143d7 100644 --- a/src/connectedk8s/setup.py +++ b/src/connectedk8s/setup.py @@ -13,7 +13,7 @@ # TODO: Confirm this is the right version number you want and it matches your # HISTORY.rst entry. -VERSION = "1.10.3" +VERSION = "1.10.4" # The full list of classifiers is available at # https://pypi.python.org/pypi?%3Aaction=list_classifiers @@ -34,6 +34,7 @@ "kubernetes==24.2.0", "pycryptodome==3.20.0", "azure-mgmt-hybridcompute==7.0.0", + "oras==0.2.25", ] with open("README.md", "r", encoding="utf-8") as f: diff --git a/testing/.gitignore b/testing/.gitignore new file mode 100644 index 00000000000..29f33294b8b --- /dev/null +++ b/testing/.gitignore @@ -0,0 +1,9 @@ +settings.json +tmp/ +bin/* +!bin/connectedk8s-1.0.0-py3-none-any.whl +!bin/k8s_extension-0.3.0-py3-none-any.whl +!bin/k8s_extension_private-0.1.0-py3-none-any.whl +!bin/k8s_configuration-1.0.0-py3-none-any.whl +!bin/connectedk8s-values.yaml +*.xml \ No newline at end of file diff --git a/testing/Bootstrap.ps1 b/testing/Bootstrap.ps1 new file mode 100644 index 00000000000..ad21cfddad2 --- /dev/null +++ b/testing/Bootstrap.ps1 @@ -0,0 +1,30 @@ +param ( + [switch] $SkipInstall, + [switch] $CI +) + +# Disable confirm prompt for script +az config set core.disable_confirm_prompt=true + +# Configuring the environment +$ENVCONFIG = Get-Content -Path $PSScriptRoot/settings.json | ConvertFrom-Json + +az account set --subscription $ENVCONFIG.subscriptionId + +if (-not (Test-Path -Path $PSScriptRoot/tmp)) { + New-Item -ItemType Directory -Path $PSScriptRoot/tmp +} + +az group show --name $envConfig.resourceGroup +if (!$?) { + Write-Host "Resource group does not exist, creating it now in region 'eastus2euap'" + az group create --name $envConfig.resourceGroup --location eastus2euap + + if (!$?) { + Write-Host "Failed to create Resource Group - exiting!" + Exit 1 + } +} + + +Copy-Item $HOME/.kube/config -Destination $PSScriptRoot/tmp/KUBECONFIG \ No newline at end of file diff --git a/testing/README.md b/testing/README.md new file mode 100644 index 00000000000..33f12b5b1a3 --- /dev/null +++ b/testing/README.md @@ -0,0 +1,116 @@ +# K8s Partner Extension Test Suite + +This repository serves as the integration testing suite for the `k8s-extension` Azure CLI module. + +## Testing Requirements + +All partners who wish to merge their __Custom Private Preview Release__ (owner: _Partner_) into the __Official Private Preview Release__ are required to author additional integration tests for their extension to ensure that their extension will continue to function correctly as more extensions are added into the __Official Private Preview Release__. + +For more information on creating these tests, see [Authoring Tests](docs/test_authoring.md) + +## Pre-Requisites + +In order to properly test all regression tests within the test suite, you must onboard an AKS cluster which you will use to generate your Azure Arc resource to test the extensions. Ensure that you have a resource group where you can onboard this cluster. + +### Required Installations + +The following installations are required in your environment for the integration tests to run correctly: + +1. [Helm 3](https://helm.sh/docs/intro/install/) +2. [Kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) +3. [Azure CLI](https://docs.microsoft.com/en-us/cli/azure/install-azure-cli) + +## Setup + +### Step 1: Install Pester + +This project contains [Pester](https://pester.dev/) test framework commands that are required for the integration tests to run. In an admin powershell terminal, run + +```powershell +Install-Module Pester -Force -SkipPublisherCheck +Import-Module Pester -PassThru +``` + +If you run into issues installing the framework, refer to the [Installation Guide](https://pester.dev/docs/introduction/installation) provided by the Pester docs. + +### Step 2: Get Test suite files + +You can either clone this repo (preferred option, since you will be adding your tests to this suite) or copy the files in this repo locally. Rest of the instructions here assume your working directory is k8spartner-extension-testing. + +### Step 3: Update the `k8s-extension`/`k8s-extension-private` .whl package + +This integration test suite references the .whl packages found in the `\bin` directory. After generating your `k8s-extension`/`k8s-extension-private` .whl package, copy your updated package into the `\bin` directory. + +### Step 4: Create a `settings.json` + +To onboard the AKS and Arc clusters correctly, you will need to create a `settings.json` configuration. Create a new `settings.json` file by copying the contents of the `settings.template.json` into this file. Update the subscription id, resource group, and AKS and Arc cluster name fields with your specific values. + +### Step 5: Update the extension version value in `settings.json` + +To ensure that the tests point to your `k8s-extension-private` `.whl` package, change the value of the `k8s-extension-private` to match your package versioning in the format (Major.Minor.Patch.Extension). For example, the `k8s_extension_private-0.1.0.openservicemesh_5-py3-none-any.whl` whl package would have extension versions set to +```json +{ + "k8s-extension": "0.1.0", + "k8s-extension-private": "0.1.0.openservicemesh_5", + "connectedk8s": "0.3.5" +} + +``` + +_Note: Updates to the `connectedk8s` version and `k8s-extension` version can also be made by adding a different version of the `connectedk8s` and `k8s-extension` whl packages and changing the `connectedk8s` and `k8s-extension` values to match the (Major.Minor.Patch) version format shown above_ + +### Step 6: Run the Bootstrap Command +To bootstrap the environment with AKS and Arc clusters, run +```powershell +.\Bootstrap.ps1 +``` +This script will provision the AKS and Arc clusters needed to run the integration test suite + +## Testing + +### Testing All Extension Suites +To test all extension test suites, you must call `.\Test.ps1` with the `-ExtensionType` parameter set to either `Public` or `Private`. Based on this flag, the test suite will install the extension type specified below + +| `-ExtensionType` | Installs `az extension` | +| ---------------- | --------------------- | +| `Public` | `k8s-extension` | +| `Private` | `k8s-extension-private` | + +For example, when calling +```bash +.\Test.ps1 -ExtensionType Public +``` +the script will install your `k8s-extension` whl package and run the full test suite of `*.Tests.ps1` files included in the `\test\extensions` directory + +### Testing Public Extensions Only +If you only want to run the test cases against public-preview or GA extension test cases, you can use the `-OnlyPublicTests` flag to specify this +```bash +.\Test.ps1 -ExtensionType Public -OnlyPublicTests +``` + +### Testing Specific Extension Suite + +If you only want to run the test script on your specific test file, you can do so by specifying path to your extension test suite in the execution call + +```powershell +.\Test.ps1 -Path +``` +For example to call the `AzureMonitor.Tests.ps1` test suite, we run +```powershell +.\Test.ps1 -ExtensionType Public -Path .\test\extensions\public\AzureMonitor.Tests.ps1 +``` + +### Skipping Extension Re-Install + +By default the `Test.ps1` script will uninstall any old versions of `k8s-extension`/'`k8s-extension-private` and re-install the version specified in `settings.json`. If you do not want this re-installation to occur, you can specify the `-SkipInstall` flag to skip this process. + +```powershell +.\Test.ps1 -ExtensionType Public -SkipInstall +``` + +## Cleanup +To cleanup the AKS and Arc clusters you have provisioned in testing, run +```powershell +.\Cleanup.ps1 +``` +This will remove the AKS and Arc clusters as well as the `\tmp` directory that were created by the bootstrapping script. \ No newline at end of file diff --git a/testing/Test.ps1 b/testing/Test.ps1 new file mode 100644 index 00000000000..1f9fc5481f0 --- /dev/null +++ b/testing/Test.ps1 @@ -0,0 +1,95 @@ +param ( + [string] $Path, + [switch] $SkipInstall, + [switch] $CI, + [switch] $ParallelCI, + [switch] $OnlyPublicTests, + + [Parameter(Mandatory=$True)] + [ValidateSet('connectedk8s')] + [string]$Type +) + +# Disable confirm prompt for script +# Only show errors, don't show warnings +az config set core.disable_confirm_prompt=true +az config set core.only_show_errors=true + +$ENVCONFIG = Get-Content -Path $PSScriptRoot/settings.json | ConvertFrom-Json + +az account set --subscription $ENVCONFIG.subscriptionId + +$Env:KUBECONFIG="$PSScriptRoot/tmp/KUBECONFIG" +$TestFileDirectory="$PSScriptRoot/results" + +if (-not (Test-Path -Path $TestFileDirectory)) { + New-Item -ItemType Directory -Path $TestFileDirectory +} + +if ($Type -eq 'connectedk8s') { + $connectedk8sVersion = $ENVCONFIG.extensionVersion.'connectedk8s' + if (!$SkipInstall) { + Write-Host "Removing the old connectedk8s extension..." + az extension remove -n connectedk8s + Write-Host "Installing connectedk8s version $connectedk8sVersion..." + az extension add --source ./bin/connectedk8s-$connectedk8sVersion-py2.py3-none-any.whl + } + $testFilePaths = "$PSScriptRoot/test/configurations" +} + +if ($ParallelCI) { + # This runs the tests in parallel during the CI pipline to speed up testing + + Write-Host "Invoking Pester to run tests from '$testFilePath'..." + $testFiles = @() + foreach ($paths in $testFilePaths) + { + $temp = Get-ChildItem $paths + $testFiles += $temp + } + $resultFileNumber = 0 + foreach ($testFile in $testFiles) + { + $resultFileNumber++ + $testName = Split-Path $testFile –leaf + Start-Job -ArgumentList $testName, $testFile, $resultFileNumber, $TestFileDirectory -Name $testName -ScriptBlock { + param($name, $testFile, $resultFileNumber, $testFileDirectory) + + Write-Host "$testFile to result file #$resultFileNumber" + $testResult = Invoke-Pester $testFile -Passthru -Output Detailed + $testResult | Export-JUnitReport -Path "$testFileDirectory/$name.xml" + } + } + + do { + Write-Host ">> Still running tests @ $(Get-Date –Format "HH:mm:ss")" –ForegroundColor Blue + Get-Job | Where-Object { $_.State -eq "Running" } | Format-Table –AutoSize + Start-Sleep –Seconds 30 + } while((Get-Job | Where-Object { $_.State -eq "Running" } | Measure-Object).Count -ge 1) + + Get-Job | Wait-Job + $failedJobs = Get-Job | Where-Object { -not ($_.State -eq "Completed")} + Get-Job | Receive-Job –AutoRemoveJob –Wait –ErrorAction 'Continue' + + if ($failedJobs.Count -gt 0) { + Write-Host "Failed Jobs" –ForegroundColor Red + $failedJobs + throw "One or more tests failed" + } +} elseif ($CI) { + if ($Path) { + $testFilePath = "$PSScriptRoot/$Path" + } + Write-Host "Invoking Pester to run tests from '$testFilePath'..." + $testResult = Invoke-Pester $testFilePath -Passthru -Output Detailed + $testName = Split-Path $testFilePath –leaf + $testResult | Export-JUnitReport -Path "$testFileDirectory/$testName.xml" +} else { + if ($Path) { + Write-Host "Invoking Pester to run tests from '$PSScriptRoot/$Path'" + Invoke-Pester -Output Detailed $PSScriptRoot/$Path + } else { + Write-Host "Invoking Pester to run tests from '$testFilePath'..." + Invoke-Pester -Output Detailed $testFilePath + } +} \ No newline at end of file diff --git a/testing/bin/connectedk8s-1.0.0-py3-none-any.whl b/testing/bin/connectedk8s-1.0.0-py3-none-any.whl new file mode 100644 index 00000000000..08f34250036 Binary files /dev/null and b/testing/bin/connectedk8s-1.0.0-py3-none-any.whl differ diff --git a/testing/bin/connectedk8s-values.yaml b/testing/bin/connectedk8s-values.yaml new file mode 100644 index 00000000000..35716eb9ba1 --- /dev/null +++ b/testing/bin/connectedk8s-values.yaml @@ -0,0 +1,3 @@ +systemDefaultValues: + extensionoperator: + enabled: true \ No newline at end of file diff --git a/testing/bin/k8s_configuration-1.0.0-py3-none-any.whl b/testing/bin/k8s_configuration-1.0.0-py3-none-any.whl new file mode 100644 index 00000000000..cc8e8e0995f Binary files /dev/null and b/testing/bin/k8s_configuration-1.0.0-py3-none-any.whl differ diff --git a/testing/bin/k8s_extension-0.3.0-py3-none-any.whl b/testing/bin/k8s_extension-0.3.0-py3-none-any.whl new file mode 100644 index 00000000000..feb28b80b43 Binary files /dev/null and b/testing/bin/k8s_extension-0.3.0-py3-none-any.whl differ diff --git a/testing/owners.txt b/testing/owners.txt new file mode 100644 index 00000000000..c1bbe9a9e5c --- /dev/null +++ b/testing/owners.txt @@ -0,0 +1,2 @@ +joinnis +nanthi \ No newline at end of file diff --git a/testing/pipeline/k8s-custom-pipelines.yml b/testing/pipeline/k8s-custom-pipelines.yml new file mode 100644 index 00000000000..6d647bd87b3 --- /dev/null +++ b/testing/pipeline/k8s-custom-pipelines.yml @@ -0,0 +1,334 @@ +resources: +- repo: self + +trigger: + batch: true + branches: + include: + - 'main' + +pr: + branches: + include: + - '*' + +stages: +- stage: BuildTestPublishExtension + displayName: "Build, Test, and Publish Extension" + variables: + TEST_PATH: $(Agent.BuildDirectory)/s/testing + CLI_REPO_PATH: $(Agent.BuildDirectory)/s + EXTENSION_NAME: "connectedk8s" + EXTENSION_FILE_NAME: "connectedk8s" + SUBSCRIPTION_ID: "15c06b1b-01d6-407b-bb21-740b8617dea3" + RESOURCE_GROUP: "K8sPartnerExtensionTest" + BASE_CLUSTER_NAME: "connectedk8s-cluster" + jobs: + - template: ./templates/run-test.yml + parameters: + jobName: BasicOnboardingTest + path: ./test/configurations/BasicOnboarding.Tests.ps1 + - template: ./templates/run-test.yml + parameters: + jobName: AutoUpdateTest + path: ./test/configurations/AutoUpdate.Tests.ps1 + - template: ./templates/run-test.yml + parameters: + jobName: ProxyTest + path: ./test/configurations/Proxy.Tests.ps1 + - template: ./templates/run-test.yml + parameters: + jobName: GatewayTest + path: ./test/configurations/Gateway.Tests.ps1 + - template: ./templates/run-test.yml + parameters: + jobName: WorkloadIdentityTest + path: ./test/configurations/WorkloadIdentity.Tests.ps1 + - template: ./templates/run-test.yml + parameters: + jobName: TroubleshootTest + path: ./test/configurations/Troubleshoot.Tests.ps1 + - template: ./templates/run-test.yml + parameters: + jobName: Connectedk8sProxyTest + path: ./test/configurations/ConnectProxy.Tests.ps1 + - job: BuildPublishExtension + pool: + vmImage: 'ubuntu-20.04' + displayName: "Build and Publish the Extension Artifact" + variables: + CLI_REPO_PATH: $(Agent.BuildDirectory)/s + EXTENSION_NAME: "connectedk8s" + steps: + - task: UsePythonVersion@0 + displayName: 'Use Python 3.6' + inputs: + versionSpec: 3.6 + - bash: | + set -ev + echo "Building extension ${EXTENSION_NAME}..." + + # prepare and activate virtualenv + pip install virtualenv + python3 -m venv env/ + source env/bin/activate + + # clone azure-cli + pip install --upgrade pip + pip install azdev + + ls $(CLI_REPO_PATH) + + azdev --version + azdev setup -r $(CLI_REPO_PATH) -e $(EXTENSION_NAME) + azdev extension build $(EXTENSION_NAME) + workingDirectory: $(CLI_REPO_PATH) + displayName: "Setup and Build Extension with azdev" + - task: PublishBuildArtifacts@1 + inputs: + pathToPublish: $(CLI_REPO_PATH)/dist + +- stage: AzureCLIOfficial + displayName: "Azure Official CLI Code Checks" + dependsOn: [] + jobs: + - job: CheckLicenseHeader + displayName: "Check License" + pool: + vmImage: 'ubuntu-latest' + steps: + - task: UsePythonVersion@0 + displayName: 'Use Python 3.10' + inputs: + versionSpec: 3.10 + - bash: | + set -ev + + # prepare and activate virtualenv + python -m venv env/ + + chmod +x ./env/bin/activate + source ./env/bin/activate + + # clone azure-cli + git clone -q --single-branch -b dev https://github.com/Azure/azure-cli.git ../azure-cli + + pip install --upgrade pip + pip install -q azdev + + azdev setup -c ../azure-cli -r ./ + + azdev --version + az --version + + azdev verify license + + - job: IndexVerify + displayName: "Verify Extensions Index" + pool: + vmImage: 'ubuntu-latest' + steps: + - task: UsePythonVersion@0 + displayName: 'Use Python 3.10' + inputs: + versionSpec: 3.10 + - bash: | + #!/usr/bin/env bash + set -ev + pip install wheel==0.30.0 requests packaging + export CI="ADO" + python ./scripts/ci/test_index.py -v + displayName: "Verify Extensions Index" + + - job: SourceTests + displayName: "Integration Tests, Build Tests" + pool: + vmImage: 'ubuntu-latest' + strategy: + matrix: + Python39: + python.version: '3.9' + Python310: + python.version: '3.10' + Python311: + python.version: '3.11' + Python312: + python.version: '3.12' + steps: + - task: UsePythonVersion@0 + displayName: 'Use Python $(python.version)' + inputs: + versionSpec: '$(python.version)' + - bash: pip install wheel==0.30.0 + displayName: 'Install wheel==0.30.0' + - bash: | + set -ev + + # prepare and activate virtualenv + pip install virtualenv + python -m virtualenv venv/ + source ./venv/bin/activate + + # clone azure-cli + git clone --single-branch -b dev https://github.com/Azure/azure-cli.git ../azure-cli + + pip install --upgrade pip + pip install azdev + + azdev --version + + azdev setup -c ../azure-cli -r ./ -e connectedk8s + azdev test connectedk8s + displayName: 'Run integration test and build test' + + - job: AzdevLinterModifiedExtensions + displayName: "azdev linter on Modified Extensions" + pool: + vmImage: 'ubuntu-latest' + steps: + - task: UsePythonVersion@0 + displayName: 'Use Python 3.12' + inputs: + versionSpec: 3.12 + - bash: | + set -ev + + # prepare and activate virtualenv + pip install virtualenv + python -m virtualenv venv/ + source ./venv/bin/activate + + # clone azure-cli + git clone --single-branch -b dev https://github.com/Azure/azure-cli.git ../azure-cli + + pip install --upgrade pip + pip install azdev + + azdev --version + + azdev setup -c ../azure-cli -r ./ -e connectedk8s + # Installing setuptools with a version higher than 70.0.0 will not generate metadata.json + pip install setuptools==70.0.0 + pip list -v + + # overwrite the default AZURE_EXTENSION_DIR set by ADO + AZURE_EXTENSION_DIR=~/.azure/cliextensions az --version + + AZURE_EXTENSION_DIR=~/.azure/cliextensions azdev linter --include-whl-extensions connectedk8s + displayName: "CLI Linter on Modified Extension" + env: + ADO_PULL_REQUEST_LATEST_COMMIT: $(System.PullRequest.SourceCommitId) + ADO_PULL_REQUEST_TARGET_BRANCH: $(System.PullRequest.TargetBranch) + + - job: AzdevStyleModifiedExtensions + displayName: "azdev style on Modified Extensions" + continueOnError: true + pool: + vmImage: 'ubuntu-latest' + steps: + - task: UsePythonVersion@0 + displayName: 'Use Python 3.12' + inputs: + versionSpec: 3.12 + - bash: | + set -ev + + # prepare and activate virtualenv + python -m venv env + chmod +x env/bin/activate + source ./env/bin/activate + + # clone azure-cli + git clone -q --single-branch -b dev https://github.com/Azure/azure-cli.git ../azure-cli + + pip install --upgrade pip + pip install azdev + + azdev --version + + azdev setup -c ../azure-cli -r ./ -e connectedk8s + # Installing setuptools with a version higher than 70.0.0 will not generate metadata.json + pip install setuptools==70.0.0 + pip list -v + az --version + + # overwrite the default AZURE_EXTENSION_DIR set by ADO + AZURE_EXTENSION_DIR=~/.azure/cliextensions az --version + + AZURE_EXTENSION_DIR=~/.azure/cliextensions azdev style connectedk8s + displayName: "azdev style on Modified Extensions" + env: + ADO_PULL_REQUEST_LATEST_COMMIT: $(System.PullRequest.SourceCommitId) + ADO_PULL_REQUEST_TARGET_BRANCH: $(System.PullRequest.TargetBranch) + + - job: RuffCheck + displayName: "Lint connectedk8s with ruff check" + pool: + vmImage: 'ubuntu-latest' + steps: + - task: UsePythonVersion@0 + displayName: 'Use Python 3.12' + inputs: + versionSpec: 3.12 + - bash: | + set -ev + + # prepare and activate virtualenv + cd src/connectedk8s + python -m venv env + source ./env/bin/activate + + pip install --upgrade pip + pip install azure-cli --editable .[linting] + + ruff check + + displayName: "ruff check" + + - job: RuffFormat + displayName: "Check connected8ks formatting with ruff" + pool: + vmImage: 'ubuntu-latest' + steps: + - task: UsePythonVersion@0 + displayName: 'Use Python 3.12' + inputs: + versionSpec: 3.12 + - bash: | + set -ev + + # prepare and activate virtualenv + cd src/connectedk8s + python -m venv env + source ./env/bin/activate + + pip install --upgrade pip + pip install azure-cli --editable .[linting] + + ruff format --check + + displayName: "ruff format check" + + - job: TypeChecking + displayName: "Typecheck connected8ks with mypy" + pool: + vmImage: 'ubuntu-latest' + steps: + - task: UsePythonVersion@0 + displayName: 'Use Python 3.12' + inputs: + versionSpec: 3.12 + - bash: | + set -ev + + # prepare and activate virtualenv + cd src/connectedk8s + python -m venv env + source ./env/bin/activate + + pip install --upgrade pip + pip install azure-cli --editable .[linting] + + mypy + + displayName: "mypy" diff --git a/testing/pipeline/templates/run-test.yml b/testing/pipeline/templates/run-test.yml new file mode 100644 index 00000000000..f1d42ae9714 --- /dev/null +++ b/testing/pipeline/templates/run-test.yml @@ -0,0 +1,112 @@ +parameters: + jobName: '' + path: '' + +jobs: +- job: ${{ parameters.jobName}} + pool: + vmImage: 'ubuntu-20.04' + steps: + - bash: | + echo "Installing helm3" + curl -fsSL -o get_helm.sh https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3 + chmod 700 get_helm.sh + ./get_helm.sh --version v3.6.3 + echo "Installing kubectl" + curl -LO "https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl" + chmod +x ./kubectl + sudo mv ./kubectl /usr/local/bin/kubectl + kubectl version --client + displayName: "Setup the VM with helm3 and kubectl" + + - task: UsePythonVersion@0 + displayName: 'Use Python 3.6' + inputs: + versionSpec: 3.6 + + - bash: | + set -ev + echo "Building extension ${EXTENSION_NAME}..." + # prepare and activate virtualenv + pip install virtualenv + python3 -m venv env/ + source env/bin/activate + # clone azure-cli + git clone -q --single-branch -b dev https://github.com/Azure/azure-cli.git ../azure-cli + pip install --upgrade pip + pip install -q azdev + ls $(CLI_REPO_PATH) + azdev --version + azdev setup -c ../azure-cli -r $(CLI_REPO_PATH) -e $(EXTENSION_NAME) + azdev extension build $(EXTENSION_NAME) + workingDirectory: $(CLI_REPO_PATH) + displayName: "Setup and Build Extension with azdev" + + - bash: | + K8S_CONFIG_VERSION=$(ls ${EXTENSION_FILE_NAME}* | cut -d "-" -f2) + echo "##vso[task.setvariable variable=K8S_CONFIG_VERSION]$K8S_CONFIG_VERSION" + cp * $(TEST_PATH)/bin + workingDirectory: $(CLI_REPO_PATH)/dist + displayName: "Copy the Built .whl to Extension Test Path" + + - bash: | + RAND_STR=$RANDOM + AKS_CLUSTER_NAME="${BASE_CLUSTER_NAME}-${RAND_STR}-aks" + ARC_CLUSTER_NAME="${BASE_CLUSTER_NAME}-${RAND_STR}-arc" + + JSON_STRING=$(jq -n \ + --arg SUB_ID "$SUBSCRIPTION_ID" \ + --arg RG "$RESOURCE_GROUP" \ + --arg AKS_CLUSTER_NAME "$AKS_CLUSTER_NAME" \ + --arg ARC_CLUSTER_NAME "$ARC_CLUSTER_NAME" \ + --arg K8S_CONFIG_VERSION "$K8S_CONFIG_VERSION" \ + '{subscriptionId: $SUB_ID, resourceGroup: $RG, aksClusterName: $AKS_CLUSTER_NAME, arcClusterName: $ARC_CLUSTER_NAME, extensionVersion: {"connectedk8s": $K8S_CONFIG_VERSION}}') + echo $JSON_STRING > settings.json + cat settings.json + workingDirectory: $(TEST_PATH) + displayName: "Generate a settings.json file" + + - bash : | + echo "Downloading the kind script" + curl -Lo ./kind https://kind.sigs.k8s.io/dl/v0.24.0/kind-linux-amd64 + chmod +x ./kind + ./kind create cluster + displayName: "Create and Start the Kind cluster" + + - bash: | + curl -sL https://aka.ms/InstallAzureCLIDeb | sudo bash + displayName: "Upgrade az to latest version" + + - bash: | + curl -fsSL -o get_helm.sh https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3 + chmod 700 get_helm.sh + ./get_helm.sh --version v3.6.3 + displayName: "Install Helm" + + - task: AzureCLI@2 + displayName: Bootstrap + inputs: + azureSubscription: AzureResourceConnection + scriptType: pscore + scriptLocation: inlineScript + inlineScript: | + .\Bootstrap.ps1 -CI + workingDirectory: $(TEST_PATH) + + - task: AzureCLI@2 + displayName: Run the Test Suite for ${{ parameters.path }} + inputs: + azureSubscription: AzureResourceConnection + scriptType: pscore + scriptLocation: inlineScript + inlineScript: | + .\Test.ps1 -CI -Path ${{ parameters.path }} -Type connectedk8s + workingDirectory: $(TEST_PATH) + continueOnError: true + + - task: PublishTestResults@2 + inputs: + testResultsFormat: 'JUnit' + testResultsFiles: '**/testing/results/*.xml' + failTaskOnFailedTests: true + condition: succeededOrFailed() diff --git a/testing/settings.template.json b/testing/settings.template.json new file mode 100644 index 00000000000..657126c20aa --- /dev/null +++ b/testing/settings.template.json @@ -0,0 +1,12 @@ +{ + "subscriptionId": "", + "resourceGroup": "", + "aksClusterName": "", + "arcClusterName": "", + + "extensionVersion": { + "k8s-extension": "0.3.0", + "k8s-extension-private": "0.1.0", + "connectedk8s": "1.0.0" + } +} \ No newline at end of file diff --git a/testing/test/configurations/AutoUpdate.Tests.ps1 b/testing/test/configurations/AutoUpdate.Tests.ps1 new file mode 100644 index 00000000000..d55029ceeb8 --- /dev/null +++ b/testing/test/configurations/AutoUpdate.Tests.ps1 @@ -0,0 +1,62 @@ +Describe 'Auto Upgrade Scenario' { + BeforeAll { + . $PSScriptRoot/../helper/Constants.ps1 + } + + It 'Check if basic onboarding works with auto-upgrade disabled' { + az connectedk8s connect -n $ENVCONFIG.arcClusterName -g $ENVCONFIG.resourceGroup -l $ARC_LOCATION --disable-auto-upgrade --no-wait + $? | Should -BeTrue + Start-Sleep -Seconds 10 + + # Loop and retry until the configuration installs + $n = 0 + do + { + $output = az connectedk8s show -n $ENVCONFIG.arcClusterName -g $ENVCONFIG.resourceGroup + $jsonOutput = [System.Text.Json.JsonDocument]::Parse($output) + $provisioningState = ($output | ConvertFrom-Json).provisioningState + $autoUpdate = $jsonOutput.RootElement.GetProperty("arcAgentProfile").GetProperty("agentAutoUpgrade").GetString() + Write-Host "Provisioning State: $provisioningState" + Write-Host "Auto Update: $autoUpdate" + if ($provisioningState -eq $SUCCEEDED -and $autoUpdate -eq "Disabled") { + break + } + Start-Sleep -Seconds 10 + $n += 1 + } while ($n -le $MAX_RETRY_ATTEMPTS) + $n | Should -BeLessOrEqual $MAX_RETRY_ATTEMPTS + } + + It 'Enable auto-upgrade using update cmd' { + az connectedk8s update -n $ENVCONFIG.arcClusterName -g $ENVCONFIG.resourceGroup --auto-upgrade true + $? | Should -BeTrue + Start-Sleep -Seconds 10 + + # Loop and retry until the configuration installs + $n = 0 + do + { + $output = az connectedk8s show -n $ENVCONFIG.arcClusterName -g $ENVCONFIG.resourceGroup + $jsonOutput = [System.Text.Json.JsonDocument]::Parse($output) + $provisioningState = ($output | ConvertFrom-Json).provisioningState + $autoUpdate = $jsonOutput.RootElement.GetProperty("arcAgentProfile").GetProperty("agentAutoUpgrade").GetString() + Write-Host "Provisioning State: $provisioningState" + Write-Host "Auto Update: $autoUpdate" + if ($provisioningState -eq $SUCCEEDED -and $autoUpdate -eq "Enabled") { + break + } + Start-Sleep -Seconds 10 + $n += 1 + } while ($n -le $MAX_RETRY_ATTEMPTS) + $n | Should -BeLessOrEqual $MAX_RETRY_ATTEMPTS + } + + It "Delete the connected instance" { + az connectedk8s delete -n $ENVCONFIG.arcClusterName -g $ENVCONFIG.resourceGroup --force -y + $? | Should -BeTrue + + # Configuration should be removed from the resource model + az connectedk8s show -n $ENVCONFIG.arcClusterName -g $ENVCONFIG.resourceGroup + $? | Should -BeFalse + } +} \ No newline at end of file diff --git a/testing/test/configurations/BasicOnboarding.Tests.ps1 b/testing/test/configurations/BasicOnboarding.Tests.ps1 new file mode 100644 index 00000000000..541327682c0 --- /dev/null +++ b/testing/test/configurations/BasicOnboarding.Tests.ps1 @@ -0,0 +1,62 @@ +Describe 'Basic Onboarding Scenario' { + BeforeAll { + . $PSScriptRoot/../helper/Constants.ps1 + } + + It 'Check if basic onboarding works correctly' { + az connectedk8s connect -n $ENVCONFIG.arcClusterName -g $ENVCONFIG.resourceGroup -l $ARC_LOCATION --no-wait + $? | Should -BeTrue + Start-Sleep -Seconds 10 + + # Loop and retry until the configuration installs + $n = 0 + do + { + $output = az connectedk8s show -n $ENVCONFIG.arcClusterName -g $ENVCONFIG.resourceGroup + $jsonOutput = [System.Text.Json.JsonDocument]::Parse($output) + $provisioningState = ($output | ConvertFrom-Json).provisioningState + $autoUpdate = $jsonOutput.RootElement.GetProperty("arcAgentProfile").GetProperty("agentAutoUpgrade").GetString() + Write-Host "Provisioning State: $provisioningState" + Write-Host "Auto Update: $autoUpdate" + if ($provisioningState -eq $SUCCEEDED -and $autoUpdate -eq "Enabled") { + break + } + Start-Sleep -Seconds 10 + $n += 1 + } while ($n -le $MAX_RETRY_ATTEMPTS) + $n | Should -BeLessOrEqual $MAX_RETRY_ATTEMPTS + } + + It 'Disable auto-upgrade' { + az connectedk8s update -n $ENVCONFIG.arcClusterName -g $ENVCONFIG.resourceGroup --auto-upgrade false + $? | Should -BeTrue + Start-Sleep -Seconds 10 + + # Loop and retry until the configuration installs + $n = 0 + do + { + $output = az connectedk8s show -n $ENVCONFIG.arcClusterName -g $ENVCONFIG.resourceGroup + $jsonOutput = [System.Text.Json.JsonDocument]::Parse($output) + $provisioningState = ($output | ConvertFrom-Json).provisioningState + $autoUpdate = $jsonOutput.RootElement.GetProperty("arcAgentProfile").GetProperty("agentAutoUpgrade").GetString() + Write-Host "Provisioning State: $provisioningState" + Write-Host "Auto Update: $autoUpdate" + if ($provisioningState -eq $SUCCEEDED -and $autoUpdate -eq "Disabled") { + break + } + Start-Sleep -Seconds 10 + $n += 1 + } while ($n -le $MAX_RETRY_ATTEMPTS) + $n | Should -BeLessOrEqual $MAX_RETRY_ATTEMPTS + } + + It "Delete the connected instance" { + az connectedk8s delete -n $ENVCONFIG.arcClusterName -g $ENVCONFIG.resourceGroup --force -y + $? | Should -BeTrue + + # Configuration should be removed from the resource model + az connectedk8s show -n $ENVCONFIG.arcClusterName -g $ENVCONFIG.resourceGroup + $? | Should -BeFalse + } +} \ No newline at end of file diff --git a/testing/test/configurations/ConnectProxy.Tests.ps1 b/testing/test/configurations/ConnectProxy.Tests.ps1 new file mode 100644 index 00000000000..3e6f8e06899 --- /dev/null +++ b/testing/test/configurations/ConnectProxy.Tests.ps1 @@ -0,0 +1,62 @@ +Describe 'Connectedk8s Proxy Scenario' { + BeforeAll { + . $PSScriptRoot/../helper/Constants.ps1 + } + + It 'Check if basic onboarding works correctly' { + az connectedk8s connect -n $ENVCONFIG.arcClusterName -g $ENVCONFIG.resourceGroup -l $ARC_LOCATION --no-wait + $? | Should -BeTrue + Start-Sleep -Seconds 10 + + # Loop and retry until the configuration installs + $n = 0 + do + { + $output = az connectedk8s show -n $ENVCONFIG.arcClusterName -g $ENVCONFIG.resourceGroup + $provisioningState = ($output | ConvertFrom-Json).provisioningState + Write-Host "Provisioning State: $provisioningState" + if ($provisioningState -eq $SUCCEEDED) { + break + } + Start-Sleep -Seconds 10 + $n += 1 + } while ($n -le $MAX_RETRY_ATTEMPTS) + $n | Should -BeLessOrEqual $MAX_RETRY_ATTEMPTS + } + + It 'Connectedk8s proxy test with non-empty kubeconfig' { + # Start the proxy command as a background job + $proxyJob = Start-Job -ScriptBlock { + param($ClusterName, $ResourceGroup) + + # Capture output and errors + try { + $output = az connectedk8s proxy -n $ClusterName -g $ResourceGroup 2>&1 + return @{ Success = $LASTEXITCODE -eq 0; Output = $output } + } catch { + return @{ Success = $false; Output = $_.Exception.Message } + } + } -ArgumentList $ENVCONFIG.arcClusterName, $ENVCONFIG.resourceGroup + + # Wait for a certain amount of time (e.g., 30 seconds) + Start-Sleep -Seconds 30 + + # Display the output + Write-Host "Proxy Job State: $($proxyJob.State)" + + # Check if the job ran successfully + $proxyJob.State | Should -Be 'Running' + + Stop-Job -Job $proxyJob + Remove-Job -Job $proxyJob + } + + It "Delete the connected instance" { + az connectedk8s delete -n $ENVCONFIG.arcClusterName -g $ENVCONFIG.resourceGroup --force -y + $? | Should -BeTrue + + # Configuration should be removed from the resource model + az connectedk8s show -n $ENVCONFIG.arcClusterName -g $ENVCONFIG.resourceGroup + $? | Should -BeFalse + } +} \ No newline at end of file diff --git a/testing/test/configurations/Gateway.Tests.ps1 b/testing/test/configurations/Gateway.Tests.ps1 new file mode 100644 index 00000000000..37dab0eccc9 --- /dev/null +++ b/testing/test/configurations/Gateway.Tests.ps1 @@ -0,0 +1,116 @@ +Describe 'Onboarding with Gateway Scenario' { + BeforeAll { + . $PSScriptRoot/../helper/Constants.ps1 + + $gatewayResourceId = "/subscriptions/15c06b1b-01d6-407b-bb21-740b8617dea3/resourceGroups/connectedk8sCLITestResources/providers/Microsoft.HybridCompute/gateways/gateway-test-cli" + } + + It 'Check if onboarding works with gateway enabled' { + az connectedk8s connect -n $ENVCONFIG.arcClusterName -g $ENVCONFIG.resourceGroup -l $ARC_LOCATION --gateway-resource-id $gatewayResourceId --no-wait + $? | Should -BeTrue + Start-Sleep -Seconds 10 + + # Loop and retry until the configuration installs + $n = 0 + do + { + $output = az connectedk8s show -n $ENVCONFIG.arcClusterName -g $ENVCONFIG.resourceGroup + $jsonOutput = [System.Text.Json.JsonDocument]::Parse($output) + $provisioningState = ($output | ConvertFrom-Json).provisioningState + $gatewayStatus = $jsonOutput.RootElement.GetProperty("gateway").GetProperty("enabled").GetBoolean() + $gatewayId = $jsonOutput.RootElement.GetProperty("gateway").GetProperty("resourceId").GetString() + Write-Host "Provisioning State: $provisioningState" + Write-Host "Gateway Status: $gatewayStatus" + Write-Host "Gateway Id: $gatewayId" + if ($provisioningState -eq $SUCCEEDED -and $gatewayStatus -eq $true -and $gatewayId -eq $gatewayResourceId) { + break + } + Start-Sleep -Seconds 10 + $n += 1 + } while ($n -le $MAX_RETRY_ATTEMPTS) + $n | Should -BeLessOrEqual $MAX_RETRY_ATTEMPTS + } + + It 'Disable the gateway' { + az connectedk8s update -n $ENVCONFIG.arcClusterName -g $ENVCONFIG.resourceGroup --disable-gateway + $? | Should -BeTrue + Start-Sleep -Seconds 10 + + # Loop and retry until the configuration installs + $n = 0 + do + { + $output = az connectedk8s show -n $ENVCONFIG.arcClusterName -g $ENVCONFIG.resourceGroup + $jsonOutput = [System.Text.Json.JsonDocument]::Parse($output) + $provisioningState = ($output | ConvertFrom-Json).provisioningState + $gatewayStatus = $jsonOutput.RootElement.GetProperty("gateway").GetProperty("enabled").GetBoolean() + Write-Host "Provisioning State: $provisioningState" + Write-Host "Gateway Status: $gatewayStatus" + if ($provisioningState -eq $SUCCEEDED -and $gatewayStatus -eq $false) { + break + } + Start-Sleep -Seconds 10 + $n += 1 + } while ($n -le $MAX_RETRY_ATTEMPTS) + $n | Should -BeLessOrEqual $MAX_RETRY_ATTEMPTS + } + + It 'Update the cluster to use gateway again using update cmd' { + az connectedk8s update -n $ENVCONFIG.arcClusterName -g $ENVCONFIG.resourceGroup --gateway-resource-id $gatewayResourceId + $? | Should -BeTrue + Start-Sleep -Seconds 10 + + # Loop and retry until the configuration installs + $n = 0 + do + { + $output = az connectedk8s show -n $ENVCONFIG.arcClusterName -g $ENVCONFIG.resourceGroup + $jsonOutput = [System.Text.Json.JsonDocument]::Parse($output) + $provisioningState = ($output | ConvertFrom-Json).provisioningState + $gatewayStatus = $jsonOutput.RootElement.GetProperty("gateway").GetProperty("enabled").GetBoolean() + $gatewayId = $jsonOutput.RootElement.GetProperty("gateway").GetProperty("resourceId").GetString() + Write-Host "Provisioning State: $provisioningState" + Write-Host "Gateway Status: $gatewayStatus" + Write-Host "Gateway Id: $gatewayId" + if ($provisioningState -eq $SUCCEEDED -and $gatewayStatus -eq $true -and $gatewayId -eq $gatewayResourceId) { + break + } + Start-Sleep -Seconds 10 + $n += 1 + } while ($n -le $MAX_RETRY_ATTEMPTS) + $n | Should -BeLessOrEqual $MAX_RETRY_ATTEMPTS + } + + It 'Disable the gateway' { + az connectedk8s update -n $ENVCONFIG.arcClusterName -g $ENVCONFIG.resourceGroup --disable-gateway + $? | Should -BeTrue + Start-Sleep -Seconds 10 + + # Loop and retry until the configuration installs + $n = 0 + do + { + $output = az connectedk8s show -n $ENVCONFIG.arcClusterName -g $ENVCONFIG.resourceGroup + $jsonOutput = [System.Text.Json.JsonDocument]::Parse($output) + $provisioningState = ($output | ConvertFrom-Json).provisioningState + $gatewayStatus = $jsonOutput.RootElement.GetProperty("gateway").GetProperty("enabled").GetBoolean() + Write-Host "Provisioning State: $provisioningState" + Write-Host "Gateway Status: $gatewayStatus" + if ($provisioningState -eq $SUCCEEDED -and $gatewayStatus -eq $false) { + break + } + Start-Sleep -Seconds 10 + $n += 1 + } while ($n -le $MAX_RETRY_ATTEMPTS) + $n | Should -BeLessOrEqual $MAX_RETRY_ATTEMPTS + } + + It "Delete the connected instance" { + az connectedk8s delete -n $ENVCONFIG.arcClusterName -g $ENVCONFIG.resourceGroup --force -y + $? | Should -BeTrue + + # Configuration should be removed from the resource model + az connectedk8s show -n $ENVCONFIG.arcClusterName -g $ENVCONFIG.resourceGroup + $? | Should -BeFalse + } +} \ No newline at end of file diff --git a/testing/test/configurations/Proxy.Tests.ps1 b/testing/test/configurations/Proxy.Tests.ps1 new file mode 100644 index 00000000000..bda7b06e4bc --- /dev/null +++ b/testing/test/configurations/Proxy.Tests.ps1 @@ -0,0 +1,65 @@ +Describe 'Proxy Scenario' { + BeforeAll { + . $PSScriptRoot/../helper/Constants.ps1 + } + + It 'Check if basic onboarding works correctly with proxy enabled' { + az connectedk8s connect -n $ENVCONFIG.arcClusterName -g $ENVCONFIG.resourceGroup -l $ARC_LOCATION --proxy-skip-range logcollector --no-wait + $? | Should -BeTrue + Start-Sleep -Seconds 10 + + # Loop and retry until the configuration installs + $n = 0 + do + { + $output = az connectedk8s show -n $ENVCONFIG.arcClusterName -g $ENVCONFIG.resourceGroup + $provisioningState = ($output | ConvertFrom-Json).provisioningState + Write-Host "Provisioning State: $provisioningState" + if ($provisioningState -eq $SUCCEEDED) { + $isProxyEnabled = helm get values -n azure-arc-release azure-arc -o yaml | grep isProxyEnabled + Write-Host "$isProxyEnabled" + if ($isProxyEnabled -match "isProxyEnabled: true") { + break + } + } + Start-Sleep -Seconds 10 + $n += 1 + } while ($n -le $MAX_RETRY_ATTEMPTS) + $n | Should -BeLessOrEqual $MAX_RETRY_ATTEMPTS + } + + It 'Disable proxy' { + az connectedk8s update -n $ENVCONFIG.arcClusterName -g $ENVCONFIG.resourceGroup --disable-proxy + $? | Should -BeTrue + Start-Sleep -Seconds 10 + + # Loop and retry until the configuration installs + $n = 0 + do + { + $output = az connectedk8s show -n $ENVCONFIG.arcClusterName -g $ENVCONFIG.resourceGroup + $provisioningState = ($output | ConvertFrom-Json).provisioningState + Write-Host "Provisioning State: $provisioningState" + if ($provisioningState -eq $SUCCEEDED) { + $isProxyEnabled = helm get values -n azure-arc-release azure-arc -o yaml | grep isProxyEnabled + Write-Host "$isProxyEnabled" + if ($isProxyEnabled -match "isProxyEnabled: false") { + break + } + break + } + Start-Sleep -Seconds 10 + $n += 1 + } while ($n -le $MAX_RETRY_ATTEMPTS) + $n | Should -BeLessOrEqual $MAX_RETRY_ATTEMPTS + } + + It "Delete the connected instance" { + az connectedk8s delete -n $ENVCONFIG.arcClusterName -g $ENVCONFIG.resourceGroup --force -y + $? | Should -BeTrue + + # Configuration should be removed from the resource model + az connectedk8s show -n $ENVCONFIG.arcClusterName -g $ENVCONFIG.resourceGroup + $? | Should -BeFalse + } +} \ No newline at end of file diff --git a/testing/test/configurations/Troubleshoot.Tests.ps1 b/testing/test/configurations/Troubleshoot.Tests.ps1 new file mode 100644 index 00000000000..c9cb4e26010 --- /dev/null +++ b/testing/test/configurations/Troubleshoot.Tests.ps1 @@ -0,0 +1,40 @@ +Describe 'Troubleshoot Scenario' { + BeforeAll { + . $PSScriptRoot/../helper/Constants.ps1 + } + + It 'Verify cluster onboarding process' { + az connectedk8s connect -n $ENVCONFIG.arcClusterName -g $ENVCONFIG.resourceGroup -l $ARC_LOCATION --no-wait + $? | Should -BeTrue + Start-Sleep -Seconds 10 + + # Loop and retry until the configuration installs + $n = 0 + do + { + $output = az connectedk8s show -n $ENVCONFIG.arcClusterName -g $ENVCONFIG.resourceGroup + $provisioningState = ($output | ConvertFrom-Json).provisioningState + Write-Host "Provisioning State: $provisioningState" + if ($provisioningState -eq $SUCCEEDED) { + break + } + Start-Sleep -Seconds 10 + $n += 1 + } while ($n -le $MAX_RETRY_ATTEMPTS) + $n | Should -BeLessOrEqual $MAX_RETRY_ATTEMPTS + } + + It 'Verify troubleshoot command functionality' { + az connectedk8s troubleshoot -n $ENVCONFIG.arcClusterName -g $ENVCONFIG.resourceGroup + $? | Should -BeTrue + } + + It "Delete the connected instance" { + az connectedk8s delete -n $ENVCONFIG.arcClusterName -g $ENVCONFIG.resourceGroup --force -y + $? | Should -BeTrue + + # Configuration should be removed from the resource model + az connectedk8s show -n $ENVCONFIG.arcClusterName -g $ENVCONFIG.resourceGroup + $? | Should -BeFalse + } +} \ No newline at end of file diff --git a/testing/test/configurations/WorkloadIdentity.Tests.ps1 b/testing/test/configurations/WorkloadIdentity.Tests.ps1 new file mode 100644 index 00000000000..c728b6a5236 --- /dev/null +++ b/testing/test/configurations/WorkloadIdentity.Tests.ps1 @@ -0,0 +1,239 @@ +Describe 'Onboarding with Workload Identity Scenario' { + BeforeAll { + . $PSScriptRoot/../helper/Constants.ps1 + } + + It 'Check if onboarding works with oidc and workload identity enabled' { + az connectedk8s connect -n $ENVCONFIG.arcClusterName -g $ENVCONFIG.resourceGroup -l $ARC_LOCATION --enable-oidc-issuer --enable-workload-identity --no-wait + $? | Should -BeTrue + Start-Sleep -Seconds 10 + + # Loop and retry until the configuration installs + $n = 0 + do + { + $output = az connectedk8s show -n $ENVCONFIG.arcClusterName -g $ENVCONFIG.resourceGroup + $jsonOutput = [System.Text.Json.JsonDocument]::Parse($output) + $provisioningState = ($output | ConvertFrom-Json).provisioningState + $securityProfile = $jsonOutput.RootElement.GetProperty("securityProfile").GetProperty("workloadIdentity").GetProperty("enabled").GetBoolean() + $oidcIssuerProfile = $jsonOutput.RootElement.GetProperty("oidcIssuerProfile").GetProperty("enabled").GetBoolean() + $issuerUrl = $jsonOutput.RootElement.GetProperty("oidcIssuerProfile").GetProperty("issuerUrl").GetString() + $selfHostedIssuerUrl = $jsonOutput.RootElement.GetProperty("oidcIssuerProfile").GetProperty("selfHostedIssuerUrl").GetString() + $agentState = $jsonOutput.RootElement.GetProperty("arcAgentProfile").GetProperty("agentState").GetString() + Write-Host "Provisioning State: $provisioningState" + Write-Host "Security Profile Status: $securityProfile" + Write-Host "OIDC Issuer Profile Status: $oidcIssuerProfile" + Write-Host "Issuer Url: $issuerUrl" + Write-Host "Self Hosted Issuer Url: $selfHostedIssuerUrl" + Write-Host "Agent State: $agentState" + if ( + $provisioningState -eq $SUCCEEDED -and + $securityProfile -eq $true -and + $oidcIssuerProfile -eq $true -and + ![string]::IsNullOrEmpty($issuerUrl) -and + $issuerUrl -like "*unitedkingdom*" -and + [string]::IsNullOrEmpty($selfHostedIssuerUrl) -and + $agentState -eq $SUCCEEDED + ) { + break + } + Start-Sleep -Seconds 10 + $n += 1 + } while ($n -le $MAX_RETRY_ATTEMPTS) + $n | Should -BeLessOrEqual $MAX_RETRY_ATTEMPTS + } + + It 'Disable workload identity' { + az connectedk8s update -n $ENVCONFIG.arcClusterName -g $ENVCONFIG.resourceGroup --disable-workload-identity + $? | Should -BeTrue + Start-Sleep -Seconds 10 + + # Loop and retry until the configuration installs + $n = 0 + do + { + $output = az connectedk8s show -n $ENVCONFIG.arcClusterName -g $ENVCONFIG.resourceGroup + $jsonOutput = [System.Text.Json.JsonDocument]::Parse($output) + $provisioningState = ($output | ConvertFrom-Json).provisioningState + $securityProfile = $jsonOutput.RootElement.GetProperty("securityProfile").GetProperty("workloadIdentity").GetProperty("enabled").GetBoolean() + $agentState = $jsonOutput.RootElement.GetProperty("arcAgentProfile").GetProperty("agentState").GetString() + Write-Host "Provisioning State: $provisioningState" + Write-Host "Security Profile Status: $securityProfile" + Write-Host "Agent State: $agentState" + if ($provisioningState -eq $SUCCEEDED -and $securityProfile -eq $false -and $agentState -eq $SUCCEEDED) { + break + } + Start-Sleep -Seconds 10 + $n += 1 + } while ($n -le $MAX_RETRY_ATTEMPTS) + $n | Should -BeLessOrEqual $MAX_RETRY_ATTEMPTS + } + + It 'Update the cluster to use workload identity again using update cmd' { + az connectedk8s update -n $ENVCONFIG.arcClusterName -g $ENVCONFIG.resourceGroup --enable-workload-identity + $? | Should -BeTrue + Start-Sleep -Seconds 10 + + # Loop and retry until the configuration installs + $n = 0 + do + { + $output = az connectedk8s show -n $ENVCONFIG.arcClusterName -g $ENVCONFIG.resourceGroup + $jsonOutput = [System.Text.Json.JsonDocument]::Parse($output) + $provisioningState = ($output | ConvertFrom-Json).provisioningState + $securityProfile = $jsonOutput.RootElement.GetProperty("securityProfile").GetProperty("workloadIdentity").GetProperty("enabled").GetBoolean() + $agentState = $jsonOutput.RootElement.GetProperty("arcAgentProfile").GetProperty("agentState").GetString() + Write-Host "Provisioning State: $provisioningState" + Write-Host "Security Profile Status: $securityProfile" + Write-Host "Agent State: $agentState" + if ( + $provisioningState -eq $SUCCEEDED -and + $securityProfile -eq $true -and + $agentState -eq $SUCCEEDED + ) { + break + } + Start-Sleep -Seconds 10 + $n += 1 + } while ($n -le $MAX_RETRY_ATTEMPTS) + $n | Should -BeLessOrEqual $MAX_RETRY_ATTEMPTS + } + + It "Delete the connected instance" { + az connectedk8s delete -n $ENVCONFIG.arcClusterName -g $ENVCONFIG.resourceGroup --force -y + $? | Should -BeTrue + + # Configuration should be removed from the resource model + az connectedk8s show -n $ENVCONFIG.arcClusterName -g $ENVCONFIG.resourceGroup + $? | Should -BeFalse + Start-Sleep -Seconds 10 + } +} + +Describe 'Updating with Workload Identity Scenario' { + BeforeAll { + . $PSScriptRoot/../helper/Constants.ps1 + } + + It 'Onboard a cluster to arc' { + az connectedk8s connect -n $ENVCONFIG.arcClusterName -g $ENVCONFIG.resourceGroup -l $ARC_LOCATION --no-wait + $? | Should -BeTrue + Start-Sleep -Seconds 10 + + # Loop and retry until the configuration installs + $n = 0 + do + { + $output = az connectedk8s show -n $ENVCONFIG.arcClusterName -g $ENVCONFIG.resourceGroup + $provisioningState = ($output | ConvertFrom-Json).provisioningState + Write-Host "Provisioning State: $provisioningState" + if ($provisioningState -eq $SUCCEEDED) { + break + } + Start-Sleep -Seconds 10 + $n += 1 + } while ($n -le $MAX_RETRY_ATTEMPTS) + $n | Should -BeLessOrEqual $MAX_RETRY_ATTEMPTS + } + + It 'Update the cluster with oidc and workload identity enabled' { + az connectedk8s update -n $ENVCONFIG.arcClusterName -g $ENVCONFIG.resourceGroup --enable-oidc-issuer --enable-workload-identity + $? | Should -BeTrue + Start-Sleep -Seconds 10 + + # Loop and retry until the configuration installs + $n = 0 + do + { + $output = az connectedk8s show -n $ENVCONFIG.arcClusterName -g $ENVCONFIG.resourceGroup + $jsonOutput = [System.Text.Json.JsonDocument]::Parse($output) + $provisioningState = ($output | ConvertFrom-Json).provisioningState + $securityProfile = $jsonOutput.RootElement.GetProperty("securityProfile").GetProperty("workloadIdentity").GetProperty("enabled").GetBoolean() + $oidcIssuerProfile = $jsonOutput.RootElement.GetProperty("oidcIssuerProfile").GetProperty("enabled").GetBoolean() + $issuerUrl = $jsonOutput.RootElement.GetProperty("oidcIssuerProfile").GetProperty("issuerUrl").GetString() + $selfHostedIssuerUrl = $jsonOutput.RootElement.GetProperty("oidcIssuerProfile").GetProperty("selfHostedIssuerUrl").GetString() + $agentState = $jsonOutput.RootElement.GetProperty("arcAgentProfile").GetProperty("agentState").GetString() + Write-Host "Provisioning State: $provisioningState" + Write-Host "Security Profile Status: $securityProfile" + Write-Host "OIDC Issuer Profile Status: $oidcIssuerProfile" + Write-Host "Issuer Url: $issuerUrl" + Write-Host "Self Hosted Issuer Url: $selfHostedIssuerUrl" + Write-Host "Agent State: $agentState" + if ( + $provisioningState -eq $SUCCEEDED -and + $securityProfile -eq $true -and + $oidcIssuerProfile -eq $true -and + ![string]::IsNullOrEmpty($issuerUrl) -and + $issuerUrl -like "*unitedkingdom*" -and + [string]::IsNullOrEmpty($selfHostedIssuerUrl) -and + $agentState -eq $SUCCEEDED + ) { + break + } + Start-Sleep -Seconds 10 + $n += 1 + } while ($n -le $MAX_RETRY_ATTEMPTS) + $n | Should -BeLessOrEqual $MAX_RETRY_ATTEMPTS + } + + It "Delete the connected instance" { + az connectedk8s delete -n $ENVCONFIG.arcClusterName -g $ENVCONFIG.resourceGroup --force -y + $? | Should -BeTrue + + # Configuration should be removed from the resource model + az connectedk8s show -n $ENVCONFIG.arcClusterName -g $ENVCONFIG.resourceGroup + $? | Should -BeFalse + Start-Sleep -Seconds 10 + } +} + +Describe 'Creating with Workload Identity Scenario and Self Hosted Issuer' { + BeforeAll { + . $PSScriptRoot/../helper/Constants.ps1 + + $SelfHostedIssuer = "https://eastus.oic.prod-aks.azure.com/fc50e82b-3761-4218-8691-d98bcgb146da/e6c4bf03-84d9-480c-a269-37a41c28c5cb/" + } + + It 'Check if onboarding works with oidc enabled and self-hosted issuer url passed in' { + az connectedk8s connect -n $ENVCONFIG.arcClusterName -g $ENVCONFIG.resourceGroup -l $ARC_LOCATION --enable-oidc-issuer --self-hosted-issuer $SelfHostedIssuer --no-wait + $? | Should -BeTrue + Start-Sleep -Seconds 10 + + # Loop and retry until the configuration installs + $n = 0 + do + { + $output = az connectedk8s show -n $ENVCONFIG.arcClusterName -g $ENVCONFIG.resourceGroup + $jsonOutput = [System.Text.Json.JsonDocument]::Parse($output) + $provisioningState = ($output | ConvertFrom-Json).provisioningState + $oidcIssuerProfile = $jsonOutput.RootElement.GetProperty("oidcIssuerProfile").GetProperty("enabled").GetBoolean() + $issuerUrl = $jsonOutput.RootElement.GetProperty("oidcIssuerProfile").GetProperty("issuerUrl").GetString() + $selfHostedIssuerUrl = $jsonOutput.RootElement.GetProperty("oidcIssuerProfile").GetProperty("selfHostedIssuerUrl").GetString() + Write-Host "Provisioning State: $provisioningState" + Write-Host "OIDC Issuer Profile Status: $oidcIssuerProfile" + Write-Host "Issuer Url: $issuerUrl" + Write-Host "Self Hosted Issuer Url: $selfHostedIssuerUrl" + if ( + $provisioningState -eq $SUCCEEDED -and + $oidcIssuerProfile -eq $true -and + [string]::IsNullOrEmpty($issuerUrl) -and + ![string]::IsNullOrEmpty($selfHostedIssuerUrl) -and + $selfHostedIssuerUrl -eq $SelfHostedIssuer + ) { + break + } + Start-Sleep -Seconds 10 + $n += 1 + } while ($n -le $MAX_RETRY_ATTEMPTS) + $n | Should -BeLessOrEqual $MAX_RETRY_ATTEMPTS + } + + It "Delete the connected instance" { + az connectedk8s delete -n $ENVCONFIG.arcClusterName -g $ENVCONFIG.resourceGroup --force -y + $? | Should -BeTrue + + # Configuration should be removed from the resource model + az connectedk8s show -n $ENVCONFIG.arcClusterName -g $ENVCONFIG.resourceGroup + $? | Should -BeFalse + } +} \ No newline at end of file diff --git a/testing/test/helper/Constants.ps1 b/testing/test/helper/Constants.ps1 new file mode 100644 index 00000000000..43006f78a69 --- /dev/null +++ b/testing/test/helper/Constants.ps1 @@ -0,0 +1,5 @@ +$ENVCONFIG = Get-Content -Path $PSScriptRoot/../../settings.json | ConvertFrom-Json + +$MAX_RETRY_ATTEMPTS = 30 +$ARC_LOCATION = "uksouth" +$SUCCEEDED = "Succeeded" \ No newline at end of file