diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 53bcda971dc..71bebf7f125 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -88,7 +88,7 @@ /src/ip-group/ @necusjz @kairu-ms @jsntcy -/src/connectedk8s/ @bavneetsingh16 @deeksha345 @anagg929 +/src/connectedk8s/ @bavneetsingh16 @deeksha345 @anagg929 @atchutbarli /src/storagesync/ @jsntcy diff --git a/src/connectedk8s/HISTORY.rst b/src/connectedk8s/HISTORY.rst index d44cb3899fb..28c23e6b0f7 100644 --- a/src/connectedk8s/HISTORY.rst +++ b/src/connectedk8s/HISTORY.rst @@ -2,6 +2,11 @@ Release History =============== +1.10.8 +++++++ +* Force delete parameter updated to `connectedk8s delete` command to allow force deletion of connectedk8s ARM resource. +* Parameterize the MCR url for Airgapped clouds. +* Update error strings for config and connectivity errors in connect command. 1.10.7 ++++++ diff --git a/src/connectedk8s/azext_connectedk8s/_constants.py b/src/connectedk8s/azext_connectedk8s/_constants.py index 4f173c668a2..61442db76b8 100644 --- a/src/connectedk8s/azext_connectedk8s/_constants.py +++ b/src/connectedk8s/azext_connectedk8s/_constants.py @@ -63,13 +63,13 @@ AHB_Enum_Values = ["True", "False", "NotApplicable"] Feature_Values = ["cluster-connect", "azure-rbac", "custom-locations"] CRD_FOR_FORCE_DELETE = [ - "arccertificates.clusterconfig.azure.com", - "azureclusteridentityrequests.clusterconfig.azure.com", - "azureextensionidentities.clusterconfig.azure.com", - "connectedclusters.arc.azure.com", - "customlocationsettings.clusterconfig.azure.com", - "extensionconfigs.clusterconfig.azure.com", - "gitconfigs.clusterconfig.azure.com", + "arccertificates.clusterconfig.azure", + "azureclusteridentityrequests.clusterconfig.azure", + "azureextensionidentities.clusterconfig.azure", + "connectedclusters.arc.azure", + "customlocationsettings.clusterconfig.azure", + "extensionconfigs.clusterconfig.azure", + "gitconfigs.clusterconfig.azure", ] Helm_Install_Release_Userfault_Messages = [ "forbidden", @@ -418,7 +418,7 @@ # Connect Precheck Diagnoser constants Cluster_Diagnostic_Checks_Job_Registry_Path = ( - "mcr.microsoft.com/azurearck8s/helmchart/stable/clusterdiagnosticchecks:0.2.2" + "azurearck8s/helmchart/stable/clusterdiagnosticchecks:0.2.2" ) Cluster_Diagnostic_Checks_Helm_Install_Failed_Fault_Type = ( "Error while installing cluster diagnostic checks helm release" @@ -460,6 +460,14 @@ Custom_Location_Enable_Failed_warning = """Important! Custom Location feature wasn't enabled due to insufficient privileges on the Service Principal Name. If the custom location feature is not enabled, you will encounter an error when creating the custom location. Refer to: https://aka.ms/enable-cl-spn""" +KubeApi_Connectivity_Failed_Warning = """Unable to verify connectivity to the Kubernetes cluster. +Please check https://learn.microsoft.com/en-us/azure/azure-arc/kubernetes/diagnose-connection-issues""" + +Kubeconfig_Load_Failed_Warning = """Unable to load the kubeconfig file. +Please check https://learn.microsoft.com/en-us/azure/azure-arc/kubernetes/diagnose-connection-issues#is-kubeconfig-pointing-to-the-right-cluster""" + +Cluster_Already_Onboarded_Error = """The kubernetes cluster is already onboarded. +Please check if the Kubeconfig is pointing to the correct cluster using command: kubectl config current-context.""" # Diagnostic Results Name Outbound_Connectivity_Check_Result_String = "Outbound Network Connectivity" @@ -481,8 +489,8 @@ DEFAULT_MAX_ONBOARDING_TIMEOUT_HELMVALUE_SECONDS = "1200" # URL constants -CLIENT_PROXY_MCR_TARGET = "mcr.microsoft.com/azureconnectivity/proxy" -HELM_MCR_URL = "mcr.microsoft.com/azurearck8s/helm" +CLIENT_PROXY_MCR_TARGET = "azureconnectivity/proxy" +HELM_MCR_URL = "azurearck8s/helm" HELM_VERSION = "v3.12.2" Download_And_Install_Kubectl_Fault_Type = "Failed to download and install kubectl" Azure_Access_Token_Variable = "AZURE_ACCESS_TOKEN" diff --git a/src/connectedk8s/azext_connectedk8s/_precheckutils.py b/src/connectedk8s/azext_connectedk8s/_precheckutils.py index 257fde55463..b10c9a22241 100644 --- a/src/connectedk8s/azext_connectedk8s/_precheckutils.py +++ b/src/connectedk8s/azext_connectedk8s/_precheckutils.py @@ -20,6 +20,7 @@ import azext_connectedk8s._utils as azext_utils if TYPE_CHECKING: + from knack.commands import CLICommand from kubernetes.client import BatchV1Api, CoreV1Api logger = get_logger(__name__) @@ -30,6 +31,7 @@ def fetch_diagnostic_checks_results( + cmd: CLICommand, corev1_api_instance: CoreV1Api, batchv1_api_instance: BatchV1Api, helm_client_location: str, @@ -52,6 +54,7 @@ def fetch_diagnostic_checks_results( # Executing the cluster_diagnostic_checks job and fetching the logs obtained cluster_diagnostic_checks_container_log = ( executing_cluster_diagnostic_checks_job( + cmd, corev1_api_instance, batchv1_api_instance, helm_client_location, @@ -135,6 +138,7 @@ def fetch_diagnostic_checks_results( def executing_cluster_diagnostic_checks_job( + cmd: CLICommand, corev1_api_instance: CoreV1Api, batchv1_api_instance: BatchV1Api, helm_client_location: str, @@ -208,8 +212,10 @@ def executing_cluster_diagnostic_checks_job( ) return None + mcr_url = azext_utils.get_mcr_path(cmd) + chart_path = azext_utils.get_chart_path( - consts.Cluster_Diagnostic_Checks_Job_Registry_Path, + f"{mcr_url}/{consts.Cluster_Diagnostic_Checks_Job_Registry_Path}", kube_config, kube_context, helm_client_location, diff --git a/src/connectedk8s/azext_connectedk8s/_utils.py b/src/connectedk8s/azext_connectedk8s/_utils.py index 55396ebcb11..3829e56a3c1 100644 --- a/src/connectedk8s/azext_connectedk8s/_utils.py +++ b/src/connectedk8s/azext_connectedk8s/_utils.py @@ -58,6 +58,28 @@ # pylint: disable=bare-except +def get_mcr_path(cmd: CLICommand) -> str: + active_directory_array = cmd.cli_ctx.cloud.endpoints.active_directory.split(".") + + # default for public, mc, ff clouds + mcr_postfix = active_directory_array[2] + # special cases for USSec, exclude part of suffix + if len(active_directory_array) == 4 and active_directory_array[2] == "microsoft": + mcr_postfix = active_directory_array[3] + # special case for USNat + elif len(active_directory_array) == 5: + mcr_postfix = ( + active_directory_array[2] + + "." + + active_directory_array[3] + + "." + + active_directory_array[4] + ) + + mcr_url = f"mcr.microsoft.{mcr_postfix}" + return mcr_url + + def validate_connect_rp_location(cmd: CLICommand, location: str) -> None: subscription_id = ( os.getenv("AZURE_SUBSCRIPTION_ID") @@ -1332,6 +1354,7 @@ def helm_install_release( "Please check if the azure-arc namespace was deployed and run 'kubectl get pods -n azure-arc' " "to check if all the pods are in running state. A possible cause for pods stuck in pending " "state could be insufficient resources on the kubernetes cluster to onboard to arc." + " Also pod logs can be checked using kubectl logs -n azure-arc.\n" ) logger.warning(warn_msg) raise CLIInternalError( diff --git a/src/connectedk8s/azext_connectedk8s/clientproxyhelper/_binaryutils.py b/src/connectedk8s/azext_connectedk8s/clientproxyhelper/_binaryutils.py index 56f7b218b7e..c655b4269de 100644 --- a/src/connectedk8s/azext_connectedk8s/clientproxyhelper/_binaryutils.py +++ b/src/connectedk8s/azext_connectedk8s/clientproxyhelper/_binaryutils.py @@ -13,16 +13,18 @@ from azure.cli.core import azclierror, telemetry from azure.cli.core.style import Style, print_styled_text from knack import log +from knack.commands import CLICommand import azext_connectedk8s._constants as consts import azext_connectedk8s._fileutils as file_utils +import azext_connectedk8s._utils as utils logger = log.get_logger(__name__) # Downloads client side proxy to connect to Arc Connectivity Platform def install_client_side_proxy( - arc_proxy_folder: Optional[str], debug: bool = False + cmd: CLICommand, arc_proxy_folder: Optional[str], debug: bool = False ) -> str: client_operating_system = _get_client_operating_system() client_architecture = _get_client_architeture() @@ -48,7 +50,11 @@ def install_client_side_proxy( ) _download_proxy_from_MCR( - install_dir, proxy_name, client_operating_system, client_architecture + cmd, + install_dir, + proxy_name, + client_operating_system, + client_architecture, ) _check_proxy_installation(install_dir, proxy_name, debug) @@ -64,15 +70,21 @@ def install_client_side_proxy( def _download_proxy_from_MCR( - dest_dir: str, proxy_name: str, operating_system: str, architecture: str + cmd: CLICommand, + dest_dir: str, + proxy_name: str, + operating_system: str, + architecture: str, ) -> None: - mar_target = f"{consts.CLIENT_PROXY_MCR_TARGET}/{operating_system.lower()}/{architecture}/arc-proxy" + mcr_url = utils.get_mcr_path(cmd) + + mar_target = f"{mcr_url}/{consts.CLIENT_PROXY_MCR_TARGET}/{operating_system.lower()}/{architecture}/arc-proxy" logger.debug( "Downloading Arc Connectivity Proxy from %s in Microsoft Artifact Regristy.", mar_target, ) - client = oras.client.OrasClient() + client = oras.client.OrasClient(hostname=mcr_url) t0 = time.time() try: diff --git a/src/connectedk8s/azext_connectedk8s/clientproxyhelper/_proxylogic.py b/src/connectedk8s/azext_connectedk8s/clientproxyhelper/_proxylogic.py index 71345064af6..f72074d1b6e 100644 --- a/src/connectedk8s/azext_connectedk8s/clientproxyhelper/_proxylogic.py +++ b/src/connectedk8s/azext_connectedk8s/clientproxyhelper/_proxylogic.py @@ -18,7 +18,7 @@ if TYPE_CHECKING: from subprocess import Popen - from knack.commands import CLICommmand + from knack.commands import CLICommand from requests.models import Response from azext_connectedk8s.vendored_sdks.preview_2024_07_01.models import ( @@ -30,7 +30,7 @@ def handle_post_at_to_csp( - cmd: CLICommmand, + cmd: CLICommand, api_server_port: int, tenant_id: str, clientproxy_process: Popen[bytes], diff --git a/src/connectedk8s/azext_connectedk8s/custom.py b/src/connectedk8s/azext_connectedk8s/custom.py index 8f481f1111b..cf6a843cffb 100644 --- a/src/connectedk8s/azext_connectedk8s/custom.py +++ b/src/connectedk8s/azext_connectedk8s/custom.py @@ -81,7 +81,7 @@ from azure.cli.core.commands import AzCliCommand from azure.core.polling import LROPoller from Crypto.PublicKey.RSA import RsaKey - from knack.commands import CLICommmand + from knack.commands import CLICommand from kubernetes.client import V1NodeList from kubernetes.config.kube_config import ConfigNode from requests.models import Response @@ -99,7 +99,7 @@ def create_connectedk8s( - cmd: CLICommmand, + cmd: CLICommand, client: ConnectedClusterOperations, resource_group_name: str, cluster_name: str, @@ -301,7 +301,7 @@ def create_connectedk8s( # Install kubectl and helm try: kubectl_client_location = install_kubectl_client() - helm_client_location = install_helm_client() + helm_client_location = install_helm_client(cmd) except Exception as e: raise CLIInternalError( f"An exception has occured while trying to perform kubectl or helm install: {e}" @@ -344,6 +344,7 @@ def create_connectedk8s( # Performing cluster-diagnostic-checks diagnostic_checks, storage_space_available = ( precheckutils.fetch_diagnostic_checks_results( + cmd, api_instance, batchv1_api_instance, helm_client_location, @@ -620,6 +621,7 @@ def create_connectedk8s( "The kubernetes cluster you are trying to onboard is already onboarded to " f"the resource group '{configmap_rg_name}' with resource name '{configmap_cluster_name}'." ) + logger.warning(consts.Cluster_Already_Onboarded_Error) raise ArgumentUsageError(err_msg) # Re-put connected cluster @@ -742,7 +744,9 @@ def create_connectedk8s( "Cleaning up the stale arc agents present on the cluster before starting new onboarding." ) # Explicit CRD Deletion - crd_cleanup_force_delete(kubectl_client_location, kube_config, kube_context) + crd_cleanup_force_delete( + cmd, kubectl_client_location, kube_config, kube_context + ) # Cleaning up the cluster utils.delete_arc_agents( release_namespace, @@ -773,7 +777,9 @@ def create_connectedk8s( raise ArgumentUsageError(err_msg, recommendation=reco_msg) # cleanup of stuck CRD if release namespace is not present/deleted - crd_cleanup_force_delete(kubectl_client_location, kube_config, kube_context) + crd_cleanup_force_delete( + cmd, kubectl_client_location, kube_config, kube_context + ) print( f"Step: {utils.get_utctimestring()}: Check if ResourceGroup exists. Try to create if it doesn't" @@ -1043,7 +1049,7 @@ def validate_existing_provisioned_cluster_for_reput( raise InvalidArgumentValueError(err_msg) -def send_cloud_telemetry(cmd: CLICommmand) -> str: +def send_cloud_telemetry(cmd: CLICommand) -> str: telemetry.add_extension_event( "connectedk8s", {"Context.Default.AzureCLI.AzureCloud": cmd.cli_ctx.cloud.name} ) @@ -1143,7 +1149,7 @@ def check_kube_connection() -> str: git_version: str = api_response.git_version return git_version except Exception as e: # pylint: disable=broad-except - logger.warning("Unable to verify connectivity to the Kubernetes cluster.") + logger.warning(consts.KubeApi_Connectivity_Failed_Warning) utils.kubernetes_exception_handler( e, consts.Kubernetes_Connectivity_FaultType, @@ -1153,7 +1159,7 @@ def check_kube_connection() -> str: assert False -def install_helm_client() -> str: +def install_helm_client(cmd: CLICommand) -> str: print( f"Step: {utils.get_utctimestring()}: Install Helm client if it does not exist" ) @@ -1219,13 +1225,16 @@ def install_helm_client() -> str: logger.warning( "Downloading helm client for first time. This can take few minutes..." ) - client = oras.client.OrasClient() + + mcr_url = utils.get_mcr_path(cmd) + + client = oras.client.OrasClient(hostname=mcr_url) retry_count = 3 retry_delay = 5 for i in range(retry_count): try: client.pull( - target=f"{consts.HELM_MCR_URL}:{artifactTag}", + target=f"{mcr_url}/{consts.HELM_MCR_URL}:{artifactTag}", outdir=download_location, ) break @@ -1289,8 +1298,22 @@ def connected_cluster_exists( return True -def get_default_config_dp_endpoint(cmd: CLICommmand, location: str) -> str: - cloud_based_domain = cmd.cli_ctx.cloud.endpoints.active_directory.split(".")[2] +def get_default_config_dp_endpoint(cmd: CLICommand, location: str) -> str: + active_directory_array = cmd.cli_ctx.cloud.endpoints.active_directory.split(".") + # default for public, mc, ff clouds + cloud_based_domain = active_directory_array[2] + # special cases for USSec/USNat clouds + if len(active_directory_array) == 4: + cloud_based_domain = active_directory_array[2] + "." + active_directory_array[3] + elif len(active_directory_array) == 5: + cloud_based_domain = ( + active_directory_array[2] + + "." + + active_directory_array[3] + + "." + + active_directory_array[4] + ) + config_dp_endpoint = ( f"https://{location}.dp.kubernetesconfiguration.azure.{cloud_based_domain}" ) @@ -1298,7 +1321,7 @@ def get_default_config_dp_endpoint(cmd: CLICommmand, location: str) -> str: def get_config_dp_endpoint( - cmd: CLICommmand, + cmd: CLICommand, location: str, values_file: str | None, arm_metadata: dict[str, Any] | None = None, @@ -1348,6 +1371,7 @@ def load_kube_config( fault_type=consts.Load_Kubeconfig_Fault_Type, summary="Problem loading the kubeconfig file", ) + logger.warning(consts.Kubeconfig_Load_Failed_Warning) raise FileOperationError("Problem loading the kubeconfig file. " + str(e)) @@ -1733,7 +1757,7 @@ def list_connectedk8s( def delete_connectedk8s( - cmd: CLICommmand, + cmd: CLICommand, client: ConnectedClusterOperations, resource_group_name: str, cluster_name: str, @@ -1785,7 +1809,7 @@ def delete_connectedk8s( check_kube_connection() # Install helm client - helm_client_location = install_helm_client() + helm_client_location = install_helm_client(cmd) # Check Release Existance release_namespace = utils.get_release_namespace( @@ -1802,10 +1826,14 @@ def delete_connectedk8s( print(f"Step: {utils.get_utctimestring()}: Performing Force Delete") kubectl_client_location = install_kubectl_client() - delete_cc_resource(client, resource_group_name, cluster_name, no_wait).result() + delete_cc_resource( + client, resource_group_name, cluster_name, no_wait, force=force_delete + ).result() # Explicit CRD Deletion - crd_cleanup_force_delete(kubectl_client_location, kube_config, kube_context) + crd_cleanup_force_delete( + cmd, kubectl_client_location, kube_config, kube_context + ) if release_namespace: utils.delete_arc_agents( @@ -1820,7 +1848,9 @@ def delete_connectedk8s( return if not release_namespace: - delete_cc_resource(client, resource_group_name, cluster_name, no_wait).result() + delete_cc_resource( + client, resource_group_name, cluster_name, no_wait, force=force_delete + ).result() return # Loading config map @@ -1871,7 +1901,9 @@ def delete_connectedk8s( recommendation=reco_str, ) - delete_cc_resource(client, resource_group_name, cluster_name, no_wait).result() + delete_cc_resource( + client, resource_group_name, cluster_name, no_wait, force=force_delete + ).result() else: telemetry.set_exception( exception="Unable to delete connected cluster", @@ -1952,15 +1984,26 @@ def delete_cc_resource( resource_group_name: str, cluster_name: str, no_wait: bool, + force: bool = False, ) -> LROPoller[None]: print(f"Step: {utils.get_utctimestring()}: Deleting ARM resource") try: - poller: LROPoller[None] = sdk_no_wait( - no_wait, - client.begin_delete, - resource_group_name=resource_group_name, - cluster_name=cluster_name, - ) + poller: LROPoller[None] + if force: + poller = sdk_no_wait( + no_wait, + client.begin_delete, + resource_group_name=resource_group_name, + cluster_name=cluster_name, + params={"force": True}, + ) + else: + poller = sdk_no_wait( + no_wait, + client.begin_delete, + resource_group_name=resource_group_name, + cluster_name=cluster_name, + ) return poller except Exception as e: utils.arm_exception_handler( @@ -1995,7 +2038,7 @@ def update_connected_cluster_internal( def update_connected_cluster( - cmd: CLICommmand, + cmd: CLICommand, client: ConnectedClusterOperations, resource_group_name: str, cluster_name: str, @@ -2158,7 +2201,7 @@ def update_connected_cluster( kubernetes_version = check_kube_connection() # Install helm client - helm_client_location = install_helm_client() + helm_client_location = install_helm_client(cmd) release_namespace = validate_release_namespace( client, @@ -2351,7 +2394,7 @@ def update_connected_cluster( def upgrade_agents( - cmd: CLICommmand, + cmd: CLICommand, client: ConnectedClusterOperations, resource_group_name: str, cluster_name: str, @@ -2396,7 +2439,7 @@ def upgrade_agents( api_instance = kube_client.CoreV1Api() # Install helm client - helm_client_location = install_helm_client() + helm_client_location = install_helm_client(cmd) # Check Release Existence release_namespace = utils.get_release_namespace( @@ -2797,7 +2840,7 @@ def get_all_helm_values( def enable_features( - cmd: CLICommmand, + cmd: CLICommand, client: ConnectedClusterOperations, resource_group_name: str, cluster_name: str, @@ -2892,7 +2935,7 @@ def enable_features( kubernetes_version = check_kube_connection() # Install helm client - helm_client_location = install_helm_client() + helm_client_location = install_helm_client(cmd) release_namespace = validate_release_namespace( client, @@ -3030,7 +3073,7 @@ def enable_features( def disable_features( - cmd: CLICommmand, + cmd: CLICommand, client: ConnectedClusterOperations, resource_group_name: str, cluster_name: str, @@ -3085,7 +3128,7 @@ def disable_features( kubernetes_version = check_kube_connection() # Install helm client - helm_client_location = install_helm_client() + helm_client_location = install_helm_client(cmd) release_namespace = validate_release_namespace( client, @@ -3169,7 +3212,7 @@ def disable_features( def get_chart_and_disable_features( - cmd: CLICommmand, + cmd: CLICommand, connected_cluster: ConnectedCluster, kube_config: str | None, kube_context: str | None, @@ -3260,7 +3303,7 @@ def get_chart_and_disable_features( def disable_cluster_connect( - cmd: CLICommmand, + cmd: CLICommand, client: ConnectedClusterOperations, resource_group_name: str, cluster_name: str, @@ -3467,7 +3510,7 @@ def handle_merge( def client_side_proxy_wrapper( - cmd: CLICommmand, + cmd: CLICommand, client: ConnectedClusterOperations, resource_group_name: str, cluster_name: str, @@ -3535,7 +3578,7 @@ def client_side_proxy_wrapper( if "--debug" in cmd.cli_ctx.data["safe_params"]: debug_mode = True - install_location = proxybinaryutils.install_client_side_proxy(None, debug_mode) + install_location = proxybinaryutils.install_client_side_proxy(cmd, None, debug_mode) args.append(install_location) install_dir = os.path.dirname(install_location) @@ -3638,7 +3681,7 @@ def client_side_proxy_wrapper( def client_side_proxy_main( - cmd: CLICommmand, + cmd: CLICommand, tenant_id: str, client: ConnectedClusterOperations, resource_group_name: str, @@ -3709,7 +3752,7 @@ def client_side_proxy_main( def client_side_proxy( - cmd: CLICommmand, + cmd: CLICommand, tenant_id: str, client: ConnectedClusterOperations, resource_group_name: str, @@ -3842,7 +3885,7 @@ def client_side_proxy( def check_cl_registration_and_get_oid( - cmd: CLICommmand, cl_oid: str | None, subscription_id: str | None + cmd: CLICommand, cl_oid: str | None, subscription_id: str | None ) -> tuple[bool, str]: print( f"Step: {utils.get_utctimestring()}: Checking Custom Location(Microsoft.ExtendedLocation) RP Registration state for this Subscription, and attempt to get the Custom Location Object ID (OID),if registered" @@ -3881,7 +3924,7 @@ def check_cl_registration_and_get_oid( return enable_custom_locations, custom_locations_oid -def get_custom_locations_oid(cmd: CLICommmand, cl_oid: str | None) -> str: +def get_custom_locations_oid(cmd: CLICommand, cl_oid: str | None) -> str: try: graph_client = graph_client_factory(cmd.cli_ctx) app_id = "bc313c14-388c-4e7d-a58e-70017303ee3b" @@ -3942,7 +3985,7 @@ def get_custom_locations_oid(cmd: CLICommmand, cl_oid: str | None) -> str: def troubleshoot( - cmd: CLICommmand, + cmd: CLICommand, client: ConnectedClusterOperations, resource_group_name: str, cluster_name: str, @@ -3985,7 +4028,7 @@ def troubleshoot( load_kube_config(kube_config, kube_context, skip_ssl_verification) # Install helm client - helm_client_location = install_helm_client() + helm_client_location = install_helm_client(cmd) # Install kubectl client kubectl_client_location = install_kubectl_client() @@ -4392,16 +4435,36 @@ def install_kubectl_client() -> str: def crd_cleanup_force_delete( - kubectl_client_location: str, kube_config: str | None, kube_context: str | None + cmd: CLICommand, + kubectl_client_location: str, + kube_config: str | None, + kube_context: str | None, ) -> None: print(f"Step: {utils.get_utctimestring()}: Deleting Arc CRDs") + + active_directory_array = cmd.cli_ctx.cloud.endpoints.active_directory.split(".") + # default for public, mc, ff clouds + cloud_based_domain = active_directory_array[2] + # special cases for USSec/USNat clouds + if len(active_directory_array) == 4: + cloud_based_domain = active_directory_array[2] + "." + active_directory_array[3] + elif len(active_directory_array) == 5: + cloud_based_domain = ( + active_directory_array[2] + + "." + + active_directory_array[3] + + "." + + active_directory_array[4] + ) + timeout_for_crd_deletion = "20s" for crds in consts.CRD_FOR_FORCE_DELETE: + full_crds = f"{crds}.{cloud_based_domain}" cmd_helm_delete = [ kubectl_client_location, "delete", "crds", - crds, + full_crds, "--ignore-not-found", "--wait", "--timeout", @@ -4424,7 +4487,8 @@ def crd_cleanup_force_delete( # Patch if CRD is in Terminating state for crds in consts.CRD_FOR_FORCE_DELETE: - cmd = [kubectl_client_location, "get", "crd", crds, "-ojson"] + full_crds = f"{crds}.{cloud_based_domain}" + cmd = [kubectl_client_location, "get", "crd", full_crds, "-ojson"] if kube_config: cmd.extend(["--kubeconfig", kube_config]) if kube_context: @@ -4441,7 +4505,7 @@ def crd_cleanup_force_delete( kubectl_client_location, "patch", "crd", - crds, + full_crds, "--type=merge", "--patch-file", yaml_file_path, diff --git a/src/connectedk8s/azext_connectedk8s/tests/latest/test_connectedk8s_scenario.py b/src/connectedk8s/azext_connectedk8s/tests/latest/test_connectedk8s_scenario.py index 9d899e786da..32b95b2f06c 100644 --- a/src/connectedk8s/azext_connectedk8s/tests/latest/test_connectedk8s_scenario.py +++ b/src/connectedk8s/azext_connectedk8s/tests/latest/test_connectedk8s_scenario.py @@ -110,10 +110,11 @@ def install_helm_client(): logger.warning( "Downloading helm client for first time. This can take few minutes..." ) - client = oras.client.OrasClient() + client = oras.client.OrasClient(hostname="mcr.microsoft.com") try: client.pull( - target=f"{consts.HELM_MCR_URL}:{artifactTag}", outdir=download_location + target=f"mcr.microsoft.com/{consts.HELM_MCR_URL}:{artifactTag}", + outdir=download_location, ) except Exception as e: logger.warning("Failed to download helm client." + str(e)) diff --git a/src/connectedk8s/setup.py b/src/connectedk8s/setup.py index 7625c142670..4f9959412ad 100644 --- a/src/connectedk8s/setup.py +++ b/src/connectedk8s/setup.py @@ -13,7 +13,7 @@ # TODO: Confirm this is the right version number you want and it matches your # HISTORY.rst entry. -VERSION = "1.10.7" +VERSION = "1.10.8" # The full list of classifiers is available at # https://pypi.python.org/pypi?%3Aaction=list_classifiers