diff --git a/src/containerapp/HISTORY.rst b/src/containerapp/HISTORY.rst index d4491b4286f..e97d0ce733e 100644 --- a/src/containerapp/HISTORY.rst +++ b/src/containerapp/HISTORY.rst @@ -6,6 +6,7 @@ upcoming ++++++ * 'az containerapp session code-interpreter execute': Extend maximum supported value of `--timeout-in-seconds` from 60 to 220. * 'az containerapp job create': Fix message with `--help` +* 'az containerapp arc': Enable setup custom core dns for Openshift on Arc 1.2.0b1 ++++++ diff --git a/src/containerapp/azext_containerapp/_arc_utils.py b/src/containerapp/azext_containerapp/_arc_utils.py index 245103c2d84..c761d1b9004 100644 --- a/src/containerapp/azext_containerapp/_arc_utils.py +++ b/src/containerapp/azext_containerapp/_arc_utils.py @@ -15,7 +15,7 @@ from azure.cli.core.azclierror import (ValidationError, ResourceNotFoundError, CLIError, InvalidArgumentValueError) from ._constants import (CUSTOM_CORE_DNS_VOLUME_NAME, CUSTOM_CORE_DNS_VOLUME_MOUNT_PATH, - CUSTOM_CORE_DNS, CORE_DNS, KUBE_SYSTEM, EMPTY_CUSTOM_CORE_DNS) + CUSTOM_CORE_DNS, CORE_DNS, KUBE_SYSTEM, EMPTY_CUSTOM_CORE_DNS, OPENSHIFT_DNS) logger = get_logger(__name__) @@ -280,6 +280,21 @@ def update_deployment(resource_name, resource_namespace, kube_client, deployment raise ValidationError(f"other errors while patching deployment coredns in kube-system {str(e)}") +def create_or_update_deployment(name, namespace, kube_client, deployment): + validate_resource_name_and_resource_namespace_not_empty(name, namespace) + + try: + logger.info(f"Start to create deployment {name} in namespace {namespace}") + apps_v1_api = client.AppsV1Api(kube_client) + apps_v1_api.create_namespaced_deployment(namespace=namespace, body=deployment) + except client.exceptions.ApiException as e: + if e.status == 409: + logger.warning(f"Deployment '{name}' already exists, replacing it") + apps_v1_api.replace_namespaced_deployment(name=name, namespace=namespace, body=deployment) + else: + raise CLIError(f"Failed to create or replace Deployment'{name}': {str(e)}") + + def replace_deployment(resource_name, resource_namespace, kube_client, deployment): validate_resource_name_and_resource_namespace_not_empty(resource_name, resource_namespace) @@ -321,6 +336,21 @@ def update_configmap(resource_name, resource_namespace, kube_client, config_map) raise CLIError(f"other errors while patching config map coredns in kube-system {str(e)}") +def create_or_update_configmap(name, namespace, kube_client, configmap): + validate_resource_name_and_resource_namespace_not_empty(name, namespace) + + try: + logger.info(f"Start to create configmap {name} in namespace {namespace}") + core_v1_api = client.CoreV1Api(kube_client) + core_v1_api.create_namespaced_config_map(namespace=namespace, body=configmap) + except client.exceptions.ApiException as e: + if e.status == 409: + logger.warning(f"Configmap '{name}' already exists, replacing it") + core_v1_api.replace_namespaced_config_map(name=name, namespace=namespace, body=configmap) + else: + raise CLIError(f"Failed to create or replace ConfigMap '{name}': {str(e)}") + + def replace_configmap(resource_name, resource_namespace, kube_client, config_map): validate_resource_name_and_resource_namespace_not_empty(resource_name, resource_namespace) @@ -356,3 +386,319 @@ def validate_resource_name_and_resource_namespace_not_empty(resource_name, resou raise InvalidArgumentValueError("Arg resource_name should not be None or Empty") if resource_namespace is None or len(resource_namespace) == 0: raise InvalidArgumentValueError("Arg resource_namespace should not be None or Empty") + + +def create_or_replace_cluster_role(rbac_api, role_name, role): + try: + logger.info(f"Creating new ClusterRole '{role_name}'") + rbac_api.create_cluster_role(body=role) + except client.exceptions.ApiException as e: + if e.status == 409: + logger.info(f"ClusterRole '{role_name}' already exists, replacing it") + rbac_api.replace_cluster_role(name=role_name, body=role) + else: + raise CLIError(f"Failed to create or replace ClusterRole '{role_name}': {str(e)}") + + +def create_or_replace_cluster_rolebinding(rbac_api, rolebinding_name, rolebinding): + try: + logger.info(f"Creating new ClusterRolebinding '{rolebinding_name}'") + rbac_api.create_cluster_role_binding(body=rolebinding) + except client.exceptions.ApiException as e: + if e.status == 409: + logger.info(f"ClusterRole '{rolebinding_name}' already exists, replacing it") + rbac_api.replace_cluster_role_binding(name=rolebinding_name, body=rolebinding) + else: + raise CLIError(f"Failed to create or replace ClusterRole '{rolebinding_name}': {str(e)}") + + +def create_openshift_custom_coredns_resources(kube_client, namespace=OPENSHIFT_DNS): + try: + logger.info("Creating custom CoreDNS resources in OpenShift") + core_v1_api = client.CoreV1Api(kube_client) + rbac_api = client.RbacAuthorizationV1Api(kube_client) + + # 1. Create ClusterRole + cluster_role = client.V1ClusterRole( + metadata=client.V1ObjectMeta( + name=CUSTOM_CORE_DNS + ), + rules=[ + client.V1PolicyRule( + api_groups=[""], + resources=["services", "endpoints", "pods", "namespaces"], + verbs=["list", "watch"] + ), + client.V1PolicyRule( + api_groups=["discovery.k8s.io"], + resources=["endpointslices"], + verbs=["list", "watch"] + ) + ] + ) + create_or_replace_cluster_role(rbac_api, CUSTOM_CORE_DNS, cluster_role) + + # 2. Create ClusterRoleBinding + cluster_role_binding = client.V1ClusterRoleBinding( + metadata=client.V1ObjectMeta( + name=CUSTOM_CORE_DNS + ), + role_ref=client.V1RoleRef( + api_group="rbac.authorization.k8s.io", + kind="ClusterRole", + name=CUSTOM_CORE_DNS + ), + subjects=[ + client.V1Subject( + kind="ServiceAccount", + name="default", + namespace=namespace + ) + ] + ) + create_or_replace_cluster_rolebinding(rbac_api, CUSTOM_CORE_DNS, cluster_role_binding) + + # 3. Create ConfigMap + existing_config_map = core_v1_api.read_namespaced_config_map(name=CUSTOM_CORE_DNS, namespace=KUBE_SYSTEM) + corefile_data = existing_config_map.data.get("k4apps-default.io.server") or existing_config_map.data.get("Corefile") + if not corefile_data: + raise ValidationError(F"Neither 'k4apps-default.io.server' nor 'Corefile' key found in the {CUSTOM_CORE_DNS} ConfigMap in {KUBE_SYSTEM} namespace.") + + config_map = client.V1ConfigMap( + metadata=client.V1ObjectMeta( + name=CUSTOM_CORE_DNS, + namespace=namespace + ), + data={"Corefile": corefile_data} + ) + + create_or_update_configmap(name=CUSTOM_CORE_DNS, namespace=namespace, kube_client=kube_client, configmap=config_map) + logger.info("Custom CoreDNS ConfigMap created successfully") + + # 4. Create Deployment + deployment = client.V1Deployment( + metadata=client.V1ObjectMeta( + name=CUSTOM_CORE_DNS, + namespace=namespace + ), + spec=client.V1DeploymentSpec( + replicas=1, + selector=client.V1LabelSelector( + match_labels={"app": CUSTOM_CORE_DNS} + ), + template=client.V1PodTemplateSpec( + metadata=client.V1ObjectMeta( + labels={"app": CUSTOM_CORE_DNS} + ), + spec=client.V1PodSpec( + containers=[ + client.V1Container( + name="coredns", + image="coredns/coredns:latest", + args=["-conf", "/etc/coredns/Corefile"], + volume_mounts=[ + client.V1VolumeMount( + name="config-volume", + mount_path="/etc/coredns" + ) + ] + ) + ], + volumes=[ + client.V1Volume( + name="config-volume", + config_map=client.V1ConfigMapVolumeSource( + name=CUSTOM_CORE_DNS + ) + ) + ] + ) + ) + ) + ) + create_or_update_deployment(name=CUSTOM_CORE_DNS, namespace=namespace, kube_client=kube_client, deployment=deployment) + logger.info("Custom CoreDNS Deployment created successfully") + + # 5 Create Service + service = client.V1Service( + metadata=client.V1ObjectMeta( + name=CUSTOM_CORE_DNS, + namespace=namespace + ), + spec=client.V1ServiceSpec( + selector={"app": CUSTOM_CORE_DNS}, + ports=[ + client.V1ServicePort( + protocol="UDP", + port=53, + target_port=53 + ) + ] + ) + ) + core_v1_api.create_namespaced_service(namespace=namespace, body=service) + logger.info("Custom CoreDNS Service created successfully") + + except client.exceptions.ApiException as e: + if e.status == 409: + logger.warning("Custom CoreDNS resources already exist") + else: + raise CLIError(f"Failed to create custom CoreDNS resources: {str(e)}") + except Exception as e: + raise CLIError(f"An error occurred while creating custom CoreDNS resources: {str(e)}") + + +def patch_openshift_dns_operator(kube_client, domain, original_folder=None): + try: + logger.info("Patching OpenShift DNS operator to add custom resolver") + + # Fetch the existing DNS operator configuration + custom_objects_api = client.CustomObjectsApi(kube_client) + + dns_operator_config = get_and_save_openshift_dns_operator_config(kube_client, original_folder) + + coredns_service = client.CoreV1Api(kube_client).read_namespaced_service(name=CUSTOM_CORE_DNS, namespace=OPENSHIFT_DNS) + + # Add the custom resolver to the DNS operator configuration + servers = dns_operator_config.get("spec", {}).get("servers", []) + custom_resolver = { + "name": CUSTOM_CORE_DNS, + "zones": [domain, f"internal.{domain}"], + "forwardPlugin": { + "upstreams": [coredns_service.spec.cluster_ip], + } + } + + # Check if the custom resolver already exists + if not any(server.get("name") == CUSTOM_CORE_DNS for server in servers): + servers.append(custom_resolver) + dns_operator_config["spec"]["servers"] = servers + + # Update the DNS operator configuration + custom_objects_api.patch_cluster_custom_object( + group="operator.openshift.io", + version="v1", + plural="dnses", + name="default", + body=dns_operator_config + ) + logger.info("Successfully patched OpenShift DNS operator with custom resolver") + else: + logger.info("Custom resolver already exists in the DNS operator configuration") + + except client.exceptions.ApiException as e: + raise CLIError(f"Failed to patch DNS operator: {str(e)}") + except Exception as e: + raise CLIError(f"An error occurred while patching DNS operator: {str(e)}") + + +def extract_domain_from_configmap(kube_client, resource_name=CUSTOM_CORE_DNS, namespace=KUBE_SYSTEM): + import re + + try: + core_v1_api = client.CoreV1Api(kube_client) + configmap = core_v1_api.read_namespaced_config_map(name=CUSTOM_CORE_DNS, namespace=KUBE_SYSTEM) + if configmap is None: + raise ResourceNotFoundError(f"ConfigMap '{resource_name}' not found in namespace '{namespace}'.") + + corefile = configmap.data.get("k4apps-default.io.server") + if not corefile: + raise ValidationError("'k4apps-default.io.server' key found in the coredns-custom ConfigMap in kube-system namespace.") + + # Extract the domain (excluding 'dapr') + for line in corefile.splitlines(): + match = re.match(r'^\s*([a-zA-Z0-9\-\.]+):53\s*{', line) + if match and match.group(1) != "dapr": + return match.group(1) + + raise ValidationError("No valid domain found in CoreDNS configmap data.") + except Exception as e: + logger.error(f"Failed to extract domain from configmap: {str(e)}") + return None + + +def get_and_save_openshift_dns_operator_config(kube_client, folder=None): + try: + custom_objects_api = client.CustomObjectsApi(kube_client) + dns_operator_config = custom_objects_api.get_cluster_custom_object( + group="operator.openshift.io", + version="v1", + plural="dnses", + name="default" + ) + + if folder is not None: + filepath = os.path.join(folder, "openshift-dns-operator-config.json") + with open(filepath, "w") as f: + f.write(json.dumps(dns_operator_config, indent=2)) + logger.info(f"OpenShift DNS operator configuration saved to {filepath}") + + return dns_operator_config + except Exception as e: + raise ValidationError(f"Failed to retrieve OpenShift DNS operator configuration: {str(e)}") + + +def restart_openshift_dns_daemonset(kube_client): + try: + # Get the DaemonSet + apps_v1_api = client.AppsV1Api(kube_client) + daemonset_name = "dns-default" + + try: + apps_v1_api.read_namespaced_daemon_set( + name=daemonset_name, + namespace=OPENSHIFT_DNS + ) + except client.exceptions.ApiException as e: + if e.status == 404: + logger.warning(f"DaemonSet '{daemonset_name}' not found in namespace '{OPENSHIFT_DNS}'") + return + else: + raise + + logger.info(f"Restarting DaemonSet '{daemonset_name}' in namespace '{OPENSHIFT_DNS}'...") + + try: + response = input(f"The DNS DaemonSet in namespace '{OPENSHIFT_DNS}' needs to be restarted. Are you sure you want to proceed? (y/n): ") + confirmed = response.lower() in ['y', 'yes'] + except (EOFError, KeyboardInterrupt): + confirmed = False + + if not confirmed: + logger.info(f"The restart of daemonset was cancelled by the user. Please manually restart the daemonset by running 'kubectl rollout restart daemonset {daemonset_name} -n {OPENSHIFT_DNS}'") + return + + # Rollout restart - this is exactly what kubectl rollout restart does + import datetime + + restart_time = datetime.datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ') + + # The kubectl rollout restart command internally does this exact patch + patch_body = { + "spec": { + "template": { + "metadata": { + "annotations": { + "kubectl.kubernetes.io/restartedAt": restart_time + } + } + } + } + } + + # Patch the daemon set to trigger a restart + apps_v1_api.patch_namespaced_daemon_set( + name=daemonset_name, + namespace=OPENSHIFT_DNS, + body=patch_body + ) + + logger.info(f"Successfully initiated restart of DaemonSet '{daemonset_name}'. Pods will be recreated automatically.") + + except client.exceptions.ApiException as e: + if e.status == 404: + logger.warning(f"DaemonSet '{daemonset_name}' not found in namespace '{OPENSHIFT_DNS}'") + else: + raise CLIError(f"Failed to restart DaemonSet: {str(e)}") + except Exception as e: + raise CLIError(f"An error occurred while restarting DaemonSet: {str(e)}") diff --git a/src/containerapp/azext_containerapp/_constants.py b/src/containerapp/azext_containerapp/_constants.py index 0cd7ba655b1..55e31b6899e 100644 --- a/src/containerapp/azext_containerapp/_constants.py +++ b/src/containerapp/azext_containerapp/_constants.py @@ -142,11 +142,13 @@ SUPPORTED_RUNTIME_LIST = [RUNTIME_GENERIC, RUNTIME_JAVA] AKS_AZURE_LOCAL_DISTRO = "AksAzureLocal" -SETUP_CORE_DNS_SUPPORTED_DISTRO = [AKS_AZURE_LOCAL_DISTRO] +OPENSHIFT_DISTRO = "openshift" +SETUP_CORE_DNS_SUPPORTED_DISTRO = [AKS_AZURE_LOCAL_DISTRO, OPENSHIFT_DISTRO] CUSTOM_CORE_DNS_VOLUME_NAME = 'custom-config-volume' CUSTOM_CORE_DNS_VOLUME_MOUNT_PATH = '/etc/coredns/custom' CUSTOM_CORE_DNS = 'coredns-custom' CORE_DNS = 'coredns' +OPENSHIFT_DNS = 'openshift-dns' KUBE_SYSTEM = 'kube-system' EMPTY_CUSTOM_CORE_DNS = """ apiVersion: v1 diff --git a/src/containerapp/azext_containerapp/_params.py b/src/containerapp/azext_containerapp/_params.py index fcebd7fba3c..c47722aa35c 100644 --- a/src/containerapp/azext_containerapp/_params.py +++ b/src/containerapp/azext_containerapp/_params.py @@ -20,7 +20,7 @@ validate_custom_location_name_or_id, validate_env_name_or_id_for_up, validate_otlp_headers, validate_target_port_range, validate_session_timeout_in_seconds) from ._constants import (MAXIMUM_CONTAINER_APP_NAME_LENGTH, MAXIMUM_APP_RESILIENCY_NAME_LENGTH, MAXIMUM_COMPONENT_RESILIENCY_NAME_LENGTH, - AKS_AZURE_LOCAL_DISTRO) + AKS_AZURE_LOCAL_DISTRO, OPENSHIFT_DISTRO) def load_arguments(self, _): @@ -378,7 +378,7 @@ def load_arguments(self, _): c.argument('yaml', type=file_type, help='Path to a .yaml file with the configuration of a Dapr component. All other parameters will be ignored. For an example, see https://learn.microsoft.com/en-us/azure/container-apps/dapr-overview?tabs=bicep1%2Cyaml#component-schema') with self.argument_context('containerapp arc setup-core-dns') as c: - c.argument('distro', arg_type=get_enum_type([AKS_AZURE_LOCAL_DISTRO]), required=True, help="The distro supported to setup CoreDNS.") + c.argument('distro', arg_type=get_enum_type([AKS_AZURE_LOCAL_DISTRO, OPENSHIFT_DISTRO]), required=True, help="The distro supported to setup CoreDNS.") c.argument('kube_config', help="Path to the kube config file.") c.argument('kube_context', help="Kube context from current machine.") c.argument('skip_ssl_verification', help="Skip SSL verification for any cluster connection.") diff --git a/src/containerapp/azext_containerapp/custom.py b/src/containerapp/azext_containerapp/custom.py index f20b7020dd2..bd39dddd5e3 100644 --- a/src/containerapp/azext_containerapp/custom.py +++ b/src/containerapp/azext_containerapp/custom.py @@ -131,12 +131,12 @@ from ._utils import (connected_env_check_cert_name_availability, get_oryx_run_image_tags, patchable_check, get_pack_exec_path, is_docker_running, parse_build_env_vars, env_has_managed_identity) -from ._arc_utils import (get_core_dns_deployment, get_core_dns_configmap, backup_custom_core_dns_configmap, +from ._arc_utils import (extract_domain_from_configmap, get_core_dns_deployment, get_core_dns_configmap, backup_custom_core_dns_configmap, replace_configmap, replace_deployment, delete_configmap, patch_coredns, create_folder, create_sub_folder, - check_kube_connection, create_kube_client) + check_kube_connection, create_kube_client, restart_openshift_dns_daemonset) -from ._constants import (CONTAINER_APPS_RP, +from ._constants import (AKS_AZURE_LOCAL_DISTRO, CONTAINER_APPS_RP, NAME_INVALID, NAME_ALREADY_EXISTS, ACR_IMAGE_SUFFIX, DEV_POSTGRES_IMAGE, DEV_POSTGRES_SERVICE_TYPE, DEV_POSTGRES_CONTAINER_NAME, DEV_REDIS_IMAGE, DEV_REDIS_SERVICE_TYPE, DEV_REDIS_CONTAINER_NAME, DEV_KAFKA_CONTAINER_NAME, DEV_KAFKA_IMAGE, DEV_KAFKA_SERVICE_TYPE, DEV_MARIADB_CONTAINER_NAME, DEV_MARIADB_IMAGE, DEV_MARIADB_SERVICE_TYPE, DEV_QDRANT_IMAGE, @@ -144,7 +144,7 @@ DEV_MILVUS_IMAGE, DEV_MILVUS_CONTAINER_NAME, DEV_MILVUS_SERVICE_TYPE, DEV_SERVICE_LIST, CONTAINER_APPS_SDK_MODELS, BLOB_STORAGE_TOKEN_STORE_SECRET_SETTING_NAME, DAPR_SUPPORTED_STATESTORE_DEV_SERVICE_LIST, DAPR_SUPPORTED_PUBSUB_DEV_SERVICE_LIST, JAVA_COMPONENT_CONFIG, JAVA_COMPONENT_EUREKA, JAVA_COMPONENT_ADMIN, JAVA_COMPONENT_NACOS, JAVA_COMPONENT_GATEWAY, DOTNET_COMPONENT_RESOURCE_TYPE, - CUSTOM_CORE_DNS, CORE_DNS, KUBE_SYSTEM) + CUSTOM_CORE_DNS, CORE_DNS, KUBE_SYSTEM, OPENSHIFT_DISTRO, OPENSHIFT_DNS) logger = get_logger(__name__) @@ -2180,91 +2180,112 @@ def setup_core_dns(cmd, distro=None, kube_config=None, kube_context=None, skip_s raise ValidationError(error) original_folder, folder_status, error = create_sub_folder(parent_folder, "original") + if not folder_status: + raise ValidationError(error) + new_filepath_with_timestamp, folder_status, error = create_sub_folder(parent_folder, "new") if not folder_status: raise ValidationError(error) kube_client = create_kube_client(kube_config, kube_context, skip_ssl_verification) - # backup original deployment and configmap - logger.info("Backup existing coredns deployment and configmap") - original_coredns_deployment = get_core_dns_deployment(kube_client, original_folder) - coredns_deployment = copy.deepcopy(original_coredns_deployment) - - original_coredns_configmap = get_core_dns_configmap(kube_client, original_folder) - coredns_configmap = copy.deepcopy(original_coredns_configmap) - - volumes = coredns_deployment.spec.template.spec.volumes - if volumes is None: - raise ValidationError('Unexpected Volumes in coredns deployment, Volumes not found') - - volume_mounts = coredns_deployment.spec.template.spec.containers[0].volume_mounts - if volume_mounts is None: - raise ValidationError('Unexpected Volume mounts in coredns deployment, VolumeMounts not found') - - coredns_configmap_volume_set = False - custom_coredns_configmap_volume_set = False - custom_coredns_configmap_volume_mounted = False - - for volume in volumes: - if volume.config_map is not None: - if volume.config_map.name == CORE_DNS: - for mount in volume_mounts: - if mount.name is not None and mount.name == volume.name: - coredns_configmap_volume_set = True - break - elif volume.config_map.name == CUSTOM_CORE_DNS: - custom_coredns_configmap_volume_set = True - for mount in volume_mounts: - if mount.name is not None and mount.name == volume.name: - custom_coredns_configmap_volume_mounted = True - break - - if not coredns_configmap_volume_set: - raise ValidationError("Cannot find volume and volume mounts for core dns config map") - - original_custom_core_dns_configmap = backup_custom_core_dns_configmap(kube_client, original_folder) + if distro == AKS_AZURE_LOCAL_DISTRO: + # backup original deployment and configmap + logger.info("Backup existing coredns deployment and configmap") + original_coredns_deployment = get_core_dns_deployment(kube_client, original_folder) + coredns_deployment = copy.deepcopy(original_coredns_deployment) + + original_coredns_configmap = get_core_dns_configmap(kube_client, original_folder) + coredns_configmap = copy.deepcopy(original_coredns_configmap) + + volumes = coredns_deployment.spec.template.spec.volumes + if volumes is None: + raise ValidationError('Unexpected Volumes in coredns deployment, Volumes not found') + + volume_mounts = coredns_deployment.spec.template.spec.containers[0].volume_mounts + if volume_mounts is None: + raise ValidationError('Unexpected Volume mounts in coredns deployment, VolumeMounts not found') + + coredns_configmap_volume_set = False + custom_coredns_configmap_volume_set = False + custom_coredns_configmap_volume_mounted = False + + for volume in volumes: + if volume.config_map is not None: + if volume.config_map.name == CORE_DNS: + for mount in volume_mounts: + if mount.name is not None and mount.name == volume.name: + coredns_configmap_volume_set = True + break + elif volume.config_map.name == CUSTOM_CORE_DNS: + custom_coredns_configmap_volume_set = True + for mount in volume_mounts: + if mount.name is not None and mount.name == volume.name: + custom_coredns_configmap_volume_mounted = True + break + + if not coredns_configmap_volume_set: + raise ValidationError("Cannot find volume and volume mounts for core dns config map") + + original_custom_core_dns_configmap = backup_custom_core_dns_configmap(kube_client, original_folder) - new_filepath_with_timestamp, folder_status, error = create_sub_folder(parent_folder, "new") - if not folder_status: - raise ValidationError(error) + try: + patch_coredns(kube_client, coredns_configmap, coredns_deployment, new_filepath_with_timestamp, + original_custom_core_dns_configmap is not None, not custom_coredns_configmap_volume_set, not custom_coredns_configmap_volume_mounted) + except Exception as e: + logger.error(f"Failed to setup custom coredns. {e}") + logger.info("Start to reverted coredns") + replace_succeeded = False + retry_count = 0 + while not replace_succeeded and retry_count < 10: + logger.info(f"Retry the revert operation with retry count {retry_count}") + + try: + logger.info("Start to reverted coredns configmap") + latest_core_dns_configmap = get_core_dns_configmap(kube_client) + latest_core_dns_configmap.data = original_coredns_configmap.data + + replace_configmap(CORE_DNS, KUBE_SYSTEM, kube_client, latest_core_dns_configmap) + logger.info("Reverted coredns configmap successfully") + + logger.info("Start to reverted coredns deployment") + latest_core_dns_deployment = get_core_dns_deployment(kube_client) + latest_core_dns_deployment.spec.template.spec = original_coredns_deployment.spec.template.spec + + replace_deployment(CORE_DNS, KUBE_SYSTEM, kube_client, latest_core_dns_deployment) + logger.info("Reverted coredns deployment successfully") + + if original_custom_core_dns_configmap is None: + delete_configmap(CUSTOM_CORE_DNS, KUBE_SYSTEM, kube_client) + replace_succeeded = True + except Exception as revertEx: + logger.warning(f"Failed to revert coredns configmap or deployment {revertEx}") + retry_count = retry_count + 1 + time.sleep(2) + + if not replace_succeeded: + logger.error(f"Failed to revert the deployment and configuration. " + f"You can get the original coredns config and deployment from {original_folder}") + elif distro == OPENSHIFT_DISTRO: + logger.info("Setting up CoreDNS for OpenShift") + try: - try: - patch_coredns(kube_client, coredns_configmap, coredns_deployment, new_filepath_with_timestamp, - original_custom_core_dns_configmap is not None, not custom_coredns_configmap_volume_set, not custom_coredns_configmap_volume_mounted) - except Exception as e: - logger.error(f"Failed to setup custom coredns. {e}") - logger.info("Start to reverted coredns") - replace_succeeded = False - retry_count = 0 - while not replace_succeeded and retry_count < 10: - logger.info(f"Retry the revert operation with retry count {retry_count}") - - try: - logger.info("Start to reverted coredns configmap") - latest_core_dns_configmap = get_core_dns_configmap(kube_client) - latest_core_dns_configmap.data = original_coredns_configmap.data - - replace_configmap(CORE_DNS, KUBE_SYSTEM, kube_client, latest_core_dns_configmap) - logger.info("Reverted coredns configmap successfully") - - logger.info("Start to reverted coredns deployment") - latest_core_dns_deployment = get_core_dns_deployment(kube_client) - latest_core_dns_deployment.spec.template.spec = original_coredns_deployment.spec.template.spec - - replace_deployment(CORE_DNS, KUBE_SYSTEM, kube_client, latest_core_dns_deployment) - logger.info("Reverted coredns deployment successfully") - - if original_custom_core_dns_configmap is None: - delete_configmap(CUSTOM_CORE_DNS, KUBE_SYSTEM, kube_client) - replace_succeeded = True - except Exception as revertEx: - logger.warning(f"Failed to revert coredns configmap or deployment {revertEx}") - retry_count = retry_count + 1 - time.sleep(2) - - if not replace_succeeded: - logger.error(f"Failed to revert the deployment and configuration. " - f"You can get the original coredns config and deployment from {original_folder}") + from ._arc_utils import create_openshift_custom_coredns_resources, patch_openshift_dns_operator + + create_openshift_custom_coredns_resources(kube_client, OPENSHIFT_DNS) + + domain = extract_domain_from_configmap(kube_client) + + # Patch the OpenShift DNS operator to use the custom CoreDNS service + patch_openshift_dns_operator(kube_client, domain, original_folder) + + restart_openshift_dns_daemonset(kube_client) + + logger.info("Successfully set up CoreDNS for OpenShift") + except Exception as e: + logger.error(f"Failed to setup CoreDNS for OpenShift. {e}") + raise ValidationError("Failed to setup CoreDNS for OpenShift distro") + else: + raise ValidationError(f"Unsupported distro: {distro}. Supported distros are: {AKS_AZURE_LOCAL_DISTRO}, {OPENSHIFT_DISTRO}.") def init_dapr_components(cmd, resource_group_name, environment_name, statestore="redis", pubsub="redis"): diff --git a/src/containerapp/setup.py b/src/containerapp/setup.py index fa5811e07a0..b0f5800ca53 100644 --- a/src/containerapp/setup.py +++ b/src/containerapp/setup.py @@ -28,7 +28,7 @@ # TODO: Confirm this is the right version number you want and it matches your # HISTORY.rst entry. -VERSION = '1.2.0b1' +VERSION = '1.2.0b2' # The full list of classifiers is available at # https://pypi.python.org/pypi?%3Aaction=list_classifiers