diff --git a/_attributes/attributes-openshift-dedicated.adoc b/_attributes/attributes-openshift-dedicated.adoc index 786b18f58cf4..6c79e71e7fea 100644 --- a/_attributes/attributes-openshift-dedicated.adoc +++ b/_attributes/attributes-openshift-dedicated.adoc @@ -16,7 +16,7 @@ :hybrid-console-second: Hybrid Cloud Console :hybrid-console-url: link:https://console.redhat.com[Red Hat Hybrid Cloud Console] :AWS: Amazon Web Services (AWS) -:GCP: Google Cloud Platform (GCP) +:GCP: Google Cloud :openshift-networking: Red Hat OpenShift Networking :product-registry: OpenShift image registry :kebab: image:kebab.png[title="Options menu"] diff --git a/_topic_maps/_topic_map.yml b/_topic_maps/_topic_map.yml index 41e8b018f0ae..5b8cca9ba183 100644 --- a/_topic_maps/_topic_map.yml +++ b/_topic_maps/_topic_map.yml @@ -315,39 +315,39 @@ Topics: File: installation-config-parameters-ash - Name: Uninstalling a cluster File: uninstalling-cluster-azure-stack-hub -- Name: Installing on GCP +- Name: Installing on Google Cloud Dir: installing_gcp Distros: openshift-origin,openshift-enterprise Topics: - - Name: Preparing to install on GCP + - Name: Preparing to install on Google Cloud File: preparing-to-install-on-gcp - - Name: Configuring a GCP project + - Name: Configuring a Google Cloud project File: installing-gcp-account - - Name: Installing a cluster quickly on GCP + - Name: Installing a cluster quickly on Google Cloud File: installing-gcp-default - - Name: Installing a cluster on GCP with customizations + - Name: Installing a cluster on Google Cloud with customizations File: installing-gcp-customizations - - Name: Installing a cluster on GCP in a disconnected environment + - Name: Installing a cluster on Google Cloud in a disconnected environment File: installing-restricted-networks-gcp-installer-provisioned - - Name: Installing a cluster on GCP into an existing VPC + - Name: Installing a cluster on Google Cloud into an existing VPC File: installing-gcp-vpc - - Name: Installing a cluster on GCP into a shared VPC + - Name: Installing a cluster on Google Cloud into a shared VPC File: installing-gcp-shared-vpc - - Name: Installing a private cluster on GCP + - Name: Installing a private cluster on Google Cloud File: installing-gcp-private - - Name: Installing a cluster on GCP using Deployment Manager templates + - Name: Installing a cluster on Google Cloud using Deployment Manager templates File: installing-gcp-user-infra - - Name: Installing a cluster into a shared VPC on GCP using Deployment Manager templates + - Name: Installing a cluster into a shared VPC on Google Cloud using Deployment Manager templates File: installing-gcp-user-infra-vpc - - Name: Installing a cluster on GCP in a disconnected environment with user-provisioned infrastructure + - Name: Installing a cluster on Google Cloud in a disconnected environment with user-provisioned infrastructure File: installing-restricted-networks-gcp - - Name: Installing a three-node cluster on GCP + - Name: Installing a three-node cluster on Google Cloud File: installing-gcp-three-node - - Name: Installation configuration parameters for GCP + - Name: Installation configuration parameters for Google Cloud File: installation-config-parameters-gcp - - Name: Uninstalling a cluster on GCP + - Name: Uninstalling a cluster on Google Cloud File: uninstalling-cluster-gcp - - Name: Installing a GCP cluster with the support for configuring multi-architecture compute machines + - Name: Installing a Google Cloud cluster with the support for configuring multi-architecture compute machines File: installing-gcp-multiarch-support - Name: Installing on IBM Cloud Dir: installing_ibm_cloud @@ -701,7 +701,7 @@ Topics: File: creating-multi-arch-compute-nodes-azure - Name: Creating a cluster with multi-architecture compute machines on AWS File: creating-multi-arch-compute-nodes-aws - - Name: Creating a cluster with multi-architecture compute machines on GCP + - Name: Creating a cluster with multi-architecture compute machines on Google Cloud File: creating-multi-arch-compute-nodes-gcp - Name: Creating a cluster with multi-architecture compute machines on bare metal, IBM Power, or IBM Z File: creating-multi-arch-compute-nodes-bare-metal @@ -1437,7 +1437,7 @@ Topics: File: nw-creating-dns-records-on-aws - Name: Creating DNS records on Azure File: nw-creating-dns-records-on-azure - - Name: Creating DNS records on GCP + - Name: Creating DNS records on Google Cloud File: nw-creating-dns-records-on-gcp - Name: Creating DNS records on Infoblox File: nw-creating-dns-records-on-infoblox @@ -1871,9 +1871,9 @@ Topics: File: persistent-storage-csi-azure-file - Name: Azure Stack Hub CSI Driver Operator File: persistent-storage-csi-azure-stack-hub - - Name: GCP PD CSI Driver Operator + - Name: Google Cloud PD CSI Driver Operator File: persistent-storage-csi-gcp-pd - - Name: GCP Filestore CSI Driver Operator + - Name: Google Cloud Filestore CSI Driver Operator File: persistent-storage-csi-google-cloud-file - Name: IBM Cloud Block Storage (VPC) CSI Driver Operator File: persistent-storage-csi-ibm-cloud-vpc-block @@ -1920,7 +1920,7 @@ Topics: Topics: - Name: Configuring the registry for AWS user-provisioned infrastructure File: configuring-registry-storage-aws-user-infrastructure - - Name: Configuring the registry for GCP user-provisioned infrastructure + - Name: Configuring the registry for Google Cloud user-provisioned infrastructure File: configuring-registry-storage-gcp-user-infrastructure - Name: Configuring the registry for OpenStack user-provisioned infrastructure File: configuring-registry-storage-openstack-user-infrastructure @@ -2383,7 +2383,7 @@ Topics: File: creating-machineset-azure - Name: Creating a compute machine set on Azure Stack Hub File: creating-machineset-azure-stack-hub - - Name: Creating a compute machine set on GCP + - Name: Creating a compute machine set on Google Cloud File: creating-machineset-gcp - Name: Creating a compute machine set on IBM Cloud File: creating-machineset-ibm-cloud @@ -2438,7 +2438,7 @@ Topics: File: cpmso-config-options-aws - Name: Control plane configuration options for Microsoft Azure File: cpmso-config-options-azure - - Name: Control plane configuration options for Google Cloud Platform + - Name: Control plane configuration options for Google Cloud File: cpmso-config-options-gcp - Name: Control plane configuration options for Nutanix File: cpmso-config-options-nutanix @@ -2468,7 +2468,7 @@ Topics: Topics: - Name: Cluster API configuration options for Amazon Web Services File: cluster-api-config-options-aws - - Name: Cluster API configuration options for Google Cloud Platform + - Name: Cluster API configuration options for Google Cloud File: cluster-api-config-options-gcp - Name: Cluster API configuration options for Microsoft Azure File: cluster-api-config-options-azure @@ -2916,7 +2916,7 @@ Topics: File: creating-windows-machineset-aws - Name: Creating a Windows machine set on Azure File: creating-windows-machineset-azure - - Name: Creating a Windows machine set on GCP + - Name: Creating a Windows machine set on Google Cloud File: creating-windows-machineset-gcp - Name: Creating a Windows machine set on Nutanix File: creating-windows-machineset-nutanix @@ -3717,10 +3717,10 @@ Topics: Topics: - Name: Configuring OADP with Azure File: installing-oadp-azure - - Name: Configuring OADP with GCP + - Name: Configuring OADP with Google Cloud Dir: installing Topics: - - Name: Configuring OADP with GCP + - Name: Configuring OADP with Google Cloud File: installing-oadp-gcp - Name: Configuring OADP with MCG Dir: installing diff --git a/architecture/osd-architecture-models-gcp.adoc b/architecture/osd-architecture-models-gcp.adoc index 54565a1d07f8..9afb97550245 100644 --- a/architecture/osd-architecture-models-gcp.adoc +++ b/architecture/osd-architecture-models-gcp.adoc @@ -1,7 +1,7 @@ :_mod-docs-content-type: ASSEMBLY -[id="osd-architecture-models-gcp"] -= {product-title} on {GCP} architecture models include::_attributes/attributes-openshift-dedicated.adoc[] +[id="osd-architecture-models-gcp"] += {product-title} on {gcp-full} architecture models :context: osd-architecture-models-gcp toc::[] @@ -21,6 +21,6 @@ include::modules/osd-public-architecture-model-gcp.adoc[leveloffset=+1] * xref:../osd_gcp_clusters/creating-a-gcp-psc-enabled-private-cluster.adoc#creating-a-gcp-psc-enabled-private-cluster[Private Service Connect overview] -* xref:../osd_gcp_clusters/creating-a-gcp-cluster-with-workload-identity-federation.adoc#osd-creating-a-cluster-on-gcp-with-workload-identity-federation[Creating a cluster on GCP with Workload Identity Federation authentication] +* xref:../osd_gcp_clusters/creating-a-gcp-cluster-with-workload-identity-federation.adoc#osd-creating-a-cluster-on-gcp-with-workload-identity-federation[Creating a cluster on {gcp-full} with Workload Identity Federation authentication] -* xref:../osd_gcp_clusters/creating-a-gcp-cluster-sa.adoc#osd-creating-a-cluster-on-gcp-sa[Creating a cluster on GCP with Service Account authentication] +* xref:../osd_gcp_clusters/creating-a-gcp-cluster-sa.adoc#osd-creating-a-cluster-on-gcp-sa[Creating a cluster on {gcp-full} with Service Account authentication] diff --git a/authentication/bound-service-account-tokens.adoc b/authentication/bound-service-account-tokens.adoc index 791036d2c566..c34c7423f9c1 100644 --- a/authentication/bound-service-account-tokens.adoc +++ b/authentication/bound-service-account-tokens.adoc @@ -6,7 +6,7 @@ include::_attributes/common-attributes.adoc[] toc::[] -You can use bound service account tokens, which improves the ability to integrate with cloud provider identity access management (IAM) services, such as {product-title} on AWS IAM or Google Cloud Platform IAM. +You can use bound service account tokens, which improves the ability to integrate with cloud provider identity access management (IAM) services, such as {product-title} on AWS IAM or {gcp-full} IAM. // About bound service account tokens include::modules/bound-sa-tokens-about.adoc[leveloffset=+1] diff --git a/authentication/managing_cloud_provider_credentials/about-cloud-credential-operator.adoc b/authentication/managing_cloud_provider_credentials/about-cloud-credential-operator.adoc index cbbe138c66f6..97aa86791439 100644 --- a/authentication/managing_cloud_provider_credentials/about-cloud-credential-operator.adoc +++ b/authentication/managing_cloud_provider_credentials/about-cloud-credential-operator.adoc @@ -46,7 +46,7 @@ By setting different values for the `credentialsMode` parameter in the `install- |X | -|Google Cloud Platform (GCP) +|{gcp-full} |X |X |X @@ -98,7 +98,7 @@ include::modules/cco-determine-mode-cli.adoc[leveloffset=+2] [id="about-cloud-credential-operator-default_{context}"] == Default behavior -For platforms on which multiple modes are supported (AWS, Azure, and GCP), when the CCO operates in its default mode, it checks the provided credentials dynamically to determine for which mode they are sufficient to process `CredentialsRequest` CRs. +For platforms on which multiple modes are supported (AWS, Azure, and {gcp-full}), when the CCO operates in its default mode, it checks the provided credentials dynamically to determine for which mode they are sufficient to process `CredentialsRequest` CRs. By default, the CCO determines whether the credentials are sufficient for mint mode, which is the preferred mode of operation, and uses those credentials to create appropriate credentials for components in the cluster. If the credentials are not sufficient for mint mode, it determines whether they are sufficient for passthrough mode. If the credentials are not sufficient for passthrough mode, the CCO cannot adequately process `CredentialsRequest` CRs. diff --git a/authentication/managing_cloud_provider_credentials/cco-mode-manual.adoc b/authentication/managing_cloud_provider_credentials/cco-mode-manual.adoc index 04ddfd66ff27..728ade2de104 100644 --- a/authentication/managing_cloud_provider_credentials/cco-mode-manual.adoc +++ b/authentication/managing_cloud_provider_credentials/cco-mode-manual.adoc @@ -6,7 +6,7 @@ include::_attributes/common-attributes.adoc[] toc::[] -Manual mode is supported for Amazon Web Services (AWS), global Microsoft Azure, Microsoft Azure Stack Hub, Google Cloud Platform (GCP), {ibm-cloud-name}, and Nutanix. +Manual mode is supported for Amazon Web Services (AWS), global Microsoft Azure, Microsoft Azure Stack Hub, {gcp-full}, {ibm-cloud-name}, and Nutanix. [id="manual-mode-classic_{context}"] == User-managed credentials @@ -19,7 +19,7 @@ For information about configuring your cloud provider to use manual mode, see th [NOTE] ==== -An AWS, global Azure, or GCP cluster that uses manual mode might be configured to use short-term credentials for different components. For more information, see xref:../../authentication/managing_cloud_provider_credentials/cco-short-term-creds.adoc#cco-short-term-creds[Manual mode with short-term credentials for components]. +An AWS, global Azure, or {gcp-full} cluster that uses manual mode might be configured to use short-term credentials for different components. For more information, see xref:../../authentication/managing_cloud_provider_credentials/cco-short-term-creds.adoc#cco-short-term-creds[Manual mode with short-term credentials for components]. ==== [role="_additional-resources"] @@ -28,7 +28,7 @@ An AWS, global Azure, or GCP cluster that uses manual mode might be configured t * xref:../../installing/installing_aws/ipi/installing-aws-customizations.adoc#manually-create-iam_installing-aws-customizations[Manually creating long-term credentials for AWS] * xref:../../installing/installing_azure/ipi/installing-azure-customizations.adoc#manually-create-iam_installing-azure-customizations[Manually creating long-term credentials for Azure] -* xref:../../installing/installing_gcp/installing-gcp-customizations.adoc#manually-create-iam_installing-gcp-customizations[Manually creating long-term credentials for GCP] +* xref:../../installing/installing_gcp/installing-gcp-customizations.adoc#manually-create-iam_installing-gcp-customizations[Manually creating long-term credentials for {gcp-full}] * xref:../../installing/installing_ibm_cloud/configuring-iam-ibm-cloud.adoc#configuring-iam-ibm-cloud[Configuring IAM for {ibm-cloud-name}] * xref:../../installing/installing_nutanix/installing-nutanix-installer-provisioned.adoc#manually-create-iam-nutanix_installing-nutanix-installer-provisioned[Configuring IAM for Nutanix] * xref:../../authentication/managing_cloud_provider_credentials/cco-short-term-creds.adoc#cco-short-term-creds[Manual mode with short-term credentials for components] diff --git a/authentication/managing_cloud_provider_credentials/cco-mode-mint.adoc b/authentication/managing_cloud_provider_credentials/cco-mode-mint.adoc index b7bb544055db..d2ca856abc66 100644 --- a/authentication/managing_cloud_provider_credentials/cco-mode-mint.adoc +++ b/authentication/managing_cloud_provider_credentials/cco-mode-mint.adoc @@ -6,7 +6,7 @@ include::_attributes/common-attributes.adoc[] toc::[] -Mint mode is the default Cloud Credential Operator (CCO) credentials mode for {product-title} on platforms that support it. Mint mode supports Amazon Web Services (AWS) and Google Cloud Platform (GCP) clusters. +Mint mode is the default Cloud Credential Operator (CCO) credentials mode for {product-title} on platforms that support it. Mint mode supports Amazon Web Services (AWS) and {gcp-full} clusters. [id="mint-mode-about"] == Mint mode credentials management @@ -47,9 +47,9 @@ The credential you provide for mint mode in Amazon Web Services (AWS) must have * `iam:SimulatePrincipalPolicy` ==== -The credential you provide for mint mode in Google Cloud Platform (GCP) must have the following permissions: +The credential you provide for mint mode in {gcp-full} must have the following permissions: -.Required GCP permissions +.Required {gcp-full} permissions [%collapsible] ==== * `resourcemanager.projects.get` diff --git a/authentication/managing_cloud_provider_credentials/cco-mode-passthrough.adoc b/authentication/managing_cloud_provider_credentials/cco-mode-passthrough.adoc index 3598fdcce782..05480a12116b 100644 --- a/authentication/managing_cloud_provider_credentials/cco-mode-passthrough.adoc +++ b/authentication/managing_cloud_provider_credentials/cco-mode-passthrough.adoc @@ -6,7 +6,7 @@ include::_attributes/common-attributes.adoc[] toc::[] -Passthrough mode is supported for Amazon Web Services (AWS), Microsoft Azure, Google Cloud Platform (GCP), {rh-openstack-first}, and VMware vSphere. +Passthrough mode is supported for Amazon Web Services (AWS), Microsoft Azure, {gcp-full}, {rh-openstack-first}, and VMware vSphere. In passthrough mode, the Cloud Credential Operator (CCO) passes the provided cloud credential to the components that request cloud credentials. The credential must have permissions to perform the installation and complete the operations that are required by components in the cluster, but does not need to be able to create new credentials. The CCO does not attempt to create additional limited-scoped credentials in passthrough mode. @@ -32,10 +32,10 @@ The credential you provide for passthrough mode in Azure must have all the reque To locate the `CredentialsRequest` CRs that are required, see xref:../../installing/installing_azure/ipi/installing-azure-customizations.adoc#manually-create-iam_installing-azure-customizations[Manually creating long-term credentials for Azure]. [id="passthrough-mode-permissions-gcp"] -=== Google Cloud Platform (GCP) permissions -The credential you provide for passthrough mode in GCP must have all the requested permissions for all `CredentialsRequest` CRs that are required by the version of {product-title} you are running or installing. +=== {gcp-full} permissions +The credential you provide for passthrough mode in {gcp-full} must have all the requested permissions for all `CredentialsRequest` CRs that are required by the version of {product-title} you are running or installing. -To locate the `CredentialsRequest` CRs that are required, see xref:../../installing/installing_gcp/installing-gcp-customizations.adoc#manually-create-iam_installing-gcp-customizations[Manually creating long-term credentials for GCP]. +To locate the `CredentialsRequest` CRs that are required, see xref:../../installing/installing_gcp/installing-gcp-customizations.adoc#manually-create-iam_installing-gcp-customizations[Manually creating long-term credentials for {gcp-full}]. [id="passthrough-mode-permissions-rhosp"] === {rh-openstack-first} permissions @@ -81,7 +81,7 @@ include::modules/admin-credentials-root-secret-formats.adoc[leveloffset=+1] [id="passthrough-mode-maintenance"] == Passthrough mode credential maintenance -If `CredentialsRequest` CRs change over time as the cluster is upgraded, you must manually update the passthrough mode credential to meet the requirements. To avoid credentials issues during an upgrade, check the `CredentialsRequest` CRs in the release image for the new version of {product-title} before upgrading. To locate the `CredentialsRequest` CRs that are required for your cloud provider, see _Manually creating long-term credentials_ for xref:../../installing/installing_aws/ipi/installing-aws-customizations.adoc#manually-create-iam_installing-aws-customizations[AWS], xref:../../installing/installing_azure/ipi/installing-azure-customizations.adoc#manually-create-iam_installing-azure-customizations[Azure], or xref:../../installing/installing_gcp/installing-gcp-customizations.adoc#manually-create-iam_installing-gcp-customizations[GCP]. +If `CredentialsRequest` CRs change over time as the cluster is upgraded, you must manually update the passthrough mode credential to meet the requirements. To avoid credentials issues during an upgrade, check the `CredentialsRequest` CRs in the release image for the new version of {product-title} before upgrading. To locate the `CredentialsRequest` CRs that are required for your cloud provider, see _Manually creating long-term credentials_ for xref:../../installing/installing_aws/ipi/installing-aws-customizations.adoc#manually-create-iam_installing-aws-customizations[AWS], xref:../../installing/installing_azure/ipi/installing-azure-customizations.adoc#manually-create-iam_installing-azure-customizations[Azure], or xref:../../installing/installing_gcp/installing-gcp-customizations.adoc#manually-create-iam_installing-gcp-customizations[{gcp-full}]. //Rotating cloud provider credentials manually include::modules/manually-rotating-cloud-creds.adoc[leveloffset=+2] @@ -96,11 +96,11 @@ When using passthrough mode, each component has the same permissions used by all After installation, you can reduce the permissions on your credential to only those that are required to run the cluster, as defined by the `CredentialsRequest` CRs in the release image for the version of {product-title} that you are using. -To locate the `CredentialsRequest` CRs that are required for AWS, Azure, or GCP and learn how to change the permissions the CCO uses, see _Manually creating long-term credentials_ for xref:../../installing/installing_aws/ipi/installing-aws-customizations.adoc#manually-create-iam_installing-aws-customizations[AWS], xref:../../installing/installing_azure/ipi/installing-azure-customizations.adoc#manually-create-iam_installing-azure-customizations[Azure], or xref:../../installing/installing_gcp/installing-gcp-customizations.adoc#manually-create-iam_installing-gcp-customizations[GCP]. +To locate the `CredentialsRequest` CRs that are required for AWS, Azure, or {gcp-full} and learn how to change the permissions the CCO uses, see _Manually creating long-term credentials_ for xref:../../installing/installing_aws/ipi/installing-aws-customizations.adoc#manually-create-iam_installing-aws-customizations[AWS], xref:../../installing/installing_azure/ipi/installing-azure-customizations.adoc#manually-create-iam_installing-azure-customizations[Azure], or xref:../../installing/installing_gcp/installing-gcp-customizations.adoc#manually-create-iam_installing-gcp-customizations[{gcp-full}]. [role="_additional-resources"] == Additional resources * xref:../../installing/installing_aws/ipi/installing-aws-customizations.adoc#manually-create-iam_installing-aws-customizations[Manually creating long-term credentials for AWS] * xref:../../installing/installing_azure/ipi/installing-azure-customizations.adoc#manually-create-iam_installing-azure-customizations[Manually creating long-term credentials for Azure] -* xref:../../installing/installing_gcp/installing-gcp-customizations.adoc#manually-create-iam_installing-gcp-customizations[Manually creating long-term credentials for GCP] +* xref:../../installing/installing_gcp/installing-gcp-customizations.adoc#manually-create-iam_installing-gcp-customizations[Manually creating long-term credentials for {gcp-full}] diff --git a/authentication/managing_cloud_provider_credentials/cco-short-term-creds.adoc b/authentication/managing_cloud_provider_credentials/cco-short-term-creds.adoc index 0576c46de0b9..96db00536692 100644 --- a/authentication/managing_cloud_provider_credentials/cco-short-term-creds.adoc +++ b/authentication/managing_cloud_provider_credentials/cco-short-term-creds.adoc @@ -97,14 +97,14 @@ include::modules/cco-short-term-creds-format-gcp.adoc[leveloffset=+2] //GCP component secret permissions requirements (placeholder) //include::modules/cco-short-term-creds-component-permissions-gcp.adoc[leveloffset=+2] -//OLM-managed Operator support for authentication with GCP Workload Identity +//OLM-managed Operator support for authentication with {gcp-full} Workload Identity include::modules/cco-short-term-creds-gcp-olm.adoc[leveloffset=+2] [role="_additional-resources"] .Additional resources * xref:../../operators/operator_sdk/token_auth/osdk-cco-gcp.adoc#osdk-cco-gcp[CCO-based workflow for OLM-managed Operators with {gcp-wid-first}] -// Application support for GCP Workload Identity service account tokens +// Application support for {gcp-full} Workload Identity service account tokens // Extra context so module can be reused within assembly (unset in module) :context: gcp // Attributes used in module with cloud-specific values (unset in module) diff --git a/backup_and_restore/application_backup_and_restore/installing/about-installing-oadp.adoc b/backup_and_restore/application_backup_and_restore/installing/about-installing-oadp.adoc index 2a95cbdfcd9a..0ca14ff20c59 100644 --- a/backup_and_restore/application_backup_and_restore/installing/about-installing-oadp.adoc +++ b/backup_and_restore/application_backup_and_restore/installing/about-installing-oadp.adoc @@ -18,7 +18,7 @@ To back up Kubernetes resources and internal images, you must have object storag * xref:../../../backup_and_restore/application_backup_and_restore/installing/installing-oadp-aws.adoc#installing-oadp-aws[Amazon Web Services] * xref:../../../backup_and_restore/application_backup_and_restore/installing/installing-oadp-azure.adoc#installing-oadp-azure[Microsoft Azure] -* xref:../../../backup_and_restore/application_backup_and_restore/installing/installing-oadp-gcp.adoc#installing-oadp-gcp[Google Cloud Platform] +* xref:../../../backup_and_restore/application_backup_and_restore/installing/installing-oadp-gcp.adoc#installing-oadp-gcp[{gcp-full}] * xref:../../../backup_and_restore/application_backup_and_restore/installing/installing-oadp-mcg.adoc#installing-oadp-mcg[Multicloud Object Gateway] * {ibm-cloud-name} Object Storage S3 * AWS S3 compatible object storage, such as Multicloud Object Gateway or MinIO @@ -33,7 +33,7 @@ To back up PVs with snapshots, you must have a cloud provider that supports eith * xref:../../../backup_and_restore/application_backup_and_restore/installing/installing-oadp-aws.adoc#installing-oadp-aws[Amazon Web Services] * xref:../../../backup_and_restore/application_backup_and_restore/installing/installing-oadp-azure.adoc#installing-oadp-azure[Microsoft Azure] -* xref:../../../backup_and_restore/application_backup_and_restore/installing/installing-oadp-gcp.adoc#installing-oadp-gcp[Google Cloud Platform] +* xref:../../../backup_and_restore/application_backup_and_restore/installing/installing-oadp-gcp.adoc#installing-oadp-gcp[{gcp-full}] * CSI snapshot-enabled cloud provider, such as xref:../../../backup_and_restore/application_backup_and_restore/installing/installing-oadp-ocs.adoc#installing-oadp-ocs[OpenShift Data Foundation] include::snippets/oadp-ocp-compat.adoc[] diff --git a/backup_and_restore/application_backup_and_restore/installing/installing-oadp-gcp.adoc b/backup_and_restore/application_backup_and_restore/installing/installing-oadp-gcp.adoc index 8fc764e5aedf..f1d8785da00e 100644 --- a/backup_and_restore/application_backup_and_restore/installing/installing-oadp-gcp.adoc +++ b/backup_and_restore/application_backup_and_restore/installing/installing-oadp-gcp.adoc @@ -1,6 +1,6 @@ :_mod-docs-content-type: ASSEMBLY [id="installing-oadp-gcp"] -= Configuring the OpenShift API for Data Protection with Google Cloud Platform += Configuring the OpenShift API for Data Protection with {gcp-full} include::_attributes/common-attributes.adoc[] :context: installing-oadp-gcp :installing-oadp-gcp: @@ -10,11 +10,11 @@ include::_attributes/common-attributes.adoc[] toc::[] [role="_abstract"] -You install the OpenShift API for Data Protection (OADP) with Google Cloud Platform (GCP) by installing the OADP Operator. The Operator installs link:https://{velero-domain}/docs/v{velero-version}/[Velero {velero-version}]. +You install the OpenShift API for Data Protection (OADP) with {gcp-full} by installing the OADP Operator. The Operator installs link:https://{velero-domain}/docs/v{velero-version}/[Velero {velero-version}]. include::snippets/oadp-mtc-operator.adoc[] -You configure GCP for Velero, create a default `Secret`, and then install the Data Protection Application. For more details, see xref:../../../backup_and_restore/application_backup_and_restore/installing/oadp-installing-operator.adoc#oadp-installing-operator-doc[Installing the OADP Operator]. +You configure {gcp-full} for Velero, create a default `Secret`, and then install the Data Protection Application. For more details, see xref:../../../backup_and_restore/application_backup_and_restore/installing/oadp-installing-operator.adoc#oadp-installing-operator-doc[Installing the OADP Operator]. To install the OADP Operator in a restricted network environment, you must first disable the default software catalog sources and mirror the Operator catalog. See xref:../../../disconnected/using-olm.adoc#olm-restricted-networks[Using Operator Lifecycle Manager in disconnected environments] for details. diff --git a/backup_and_restore/index.adoc b/backup_and_restore/index.adoc index 3ade386b3f34..fe20b3609820 100644 --- a/backup_and_restore/index.adoc +++ b/backup_and_restore/index.adoc @@ -51,7 +51,7 @@ OADP has the following requirements: ** OpenShift Data Foundation ** Amazon Web Services ** Microsoft Azure -** Google Cloud Platform +** {gcp-full} ** S3-compatible object storage ** {ibm-cloud-name} Object Storage S3 @@ -61,7 +61,7 @@ include::snippets/oadp-ocp-compat.adoc[] ** Amazon Web Services ** Microsoft Azure -** Google Cloud Platform +** {gcp-full} ** CSI snapshot-enabled cloud storage, such as Ceph RBD or Ceph FS [NOTE] diff --git a/installing/installing_gcp/installation-config-parameters-gcp.adoc b/installing/installing_gcp/installation-config-parameters-gcp.adoc index ecd4e7a38a09..15fbf960ce83 100644 --- a/installing/installing_gcp/installation-config-parameters-gcp.adoc +++ b/installing/installing_gcp/installation-config-parameters-gcp.adoc @@ -1,12 +1,12 @@ :_mod-docs-content-type: ASSEMBLY -[id="installation-config-parameters-gcp"] -= Installation configuration parameters for GCP include::_attributes/common-attributes.adoc[] +[id="installation-config-parameters-gcp"] += Installation configuration parameters for {gcp-full} :context: installation-config-parameters-gcp :platform: GCP toc::[] -Before you deploy an {product-title} cluster on Google Cloud Platform (GCP), you provide parameters to customize your cluster and the platform that hosts it. When you create the `install-config.yaml` file, you provide values for the required parameters through the command line. You can then modify the `install-config.yaml` file to customize your cluster further. +Before you deploy an {product-title} cluster on {gcp-full}, you provide parameters to customize your cluster and the platform that hosts it. When you create the `install-config.yaml` file, you provide values for the required parameters through the command line. You can then modify the `install-config.yaml` file to customize your cluster further. include::modules/installation-configuration-parameters.adoc[leveloffset=+1] diff --git a/installing/installing_gcp/installing-gcp-account.adoc b/installing/installing_gcp/installing-gcp-account.adoc index e9219f122445..bd827110f04d 100644 --- a/installing/installing_gcp/installing-gcp-account.adoc +++ b/installing/installing_gcp/installing-gcp-account.adoc @@ -1,13 +1,13 @@ :_mod-docs-content-type: ASSEMBLY -[id="installing-gcp-account"] -= Configuring a GCP project include::_attributes/common-attributes.adoc[] +[id="installing-gcp-account"] += Configuring a {gcp-full} project :context: installing-gcp-account toc::[] Before you can install {product-title}, you must configure a -Google Cloud Platform (GCP) project to host it. +{gcp-full} project to host it. include::modules/installation-gcp-project.adoc[leveloffset=+1] @@ -31,7 +31,7 @@ include::modules/installation-gcp-regions.adoc[leveloffset=+1] == Next steps -* Install an {product-title} cluster on GCP. You can +* Install an {product-title} cluster on {gcp-full}. You can xref:../../installing/installing_gcp/installing-gcp-customizations.adoc#installing-gcp-customizations[install a customized cluster] or xref:../../installing/installing_gcp/installing-gcp-default.adoc#installing-gcp-default[quickly install a cluster] with default options. diff --git a/installing/installing_gcp/installing-gcp-customizations.adoc b/installing/installing_gcp/installing-gcp-customizations.adoc index 7a83c7ad0084..5579ad787850 100644 --- a/installing/installing_gcp/installing-gcp-customizations.adoc +++ b/installing/installing_gcp/installing-gcp-customizations.adoc @@ -1,7 +1,7 @@ :_mod-docs-content-type: ASSEMBLY -[id="installing-gcp-customizations"] -= Installing a cluster on GCP with customizations include::_attributes/common-attributes.adoc[] +[id="installing-gcp-customizations"] += Installing a cluster on {gcp-full} with customizations :context: installing-gcp-customizations :platform: GCP @@ -73,10 +73,10 @@ include::modules/installing-gcp-user-defined-labels-and-tags.adoc[leveloffset=+1 // Criteria for user-defined labels and tags include::modules/installing-gcp-cluster-label-tag-reference.adoc[leveloffset=+2] -//Configuring user-defined labels and tags for GCP +//Configuring user-defined labels and tags for {gcp-full} include::modules/installing-gcp-cluster-creation.adoc[leveloffset=+2] -//Querying user-defined labels and tags for GCP +//Querying user-defined labels and tags for {gcp-full} include::modules/installing-gcp-querying-labels-tags-gcp.adoc[leveloffset=+2] //Installing the OpenShift CLI by downloading the binary: Moved up to precede `ccoctl` steps, which require the use of `oc` @@ -94,7 +94,7 @@ By default, administrator secrets are stored in the `kube-system` project. If yo //Manually creating long-term credentials include::modules/manually-create-identity-access-management.adoc[leveloffset=+2] -//Supertask: Configuring a GCP cluster to use short-term credentials +//Supertask: Configuring a {gcp-full} cluster to use short-term credentials [id="installing-gcp-with-short-term-creds_{context}"] === Configuring a {gcp-short} cluster to use short-term credentials @@ -103,7 +103,7 @@ To install a cluster that is configured to use {gcp-short} Workload Identity, yo //Task part 1: Configuring the Cloud Credential Operator utility include::modules/cco-ccoctl-configuring.adoc[leveloffset=+3] -//Task part 2: Creating the required GCP resources +//Task part 2: Creating the required {gcp-full} resources include::modules/cco-ccoctl-creating-at-once.adoc[leveloffset=+3] //Task part 3: Incorporating the Cloud Credential Operator utility manifests diff --git a/installing/installing_gcp/installing-gcp-default.adoc b/installing/installing_gcp/installing-gcp-default.adoc index 2c4a0b4abafb..bb3b228041f6 100644 --- a/installing/installing_gcp/installing-gcp-default.adoc +++ b/installing/installing_gcp/installing-gcp-default.adoc @@ -1,19 +1,19 @@ :_mod-docs-content-type: ASSEMBLY -[id="installing-gcp-default"] -= Installing a cluster quickly on GCP include::_attributes/common-attributes.adoc[] +[id="installing-gcp-default"] += Installing a cluster quickly on {gcp-full} :context: installing-gcp-default toc::[] In {product-title} version {product-version}, you can install a cluster on -Google Cloud Platform (GCP) that uses the default configuration options. +{gcp-full} that uses the default configuration options. == Prerequisites * You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. * You read the documentation on xref:../../installing/overview/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* You xref:../../installing/installing_gcp/installing-gcp-account.adoc#installing-gcp-account[configured a GCP project] to host the cluster. +* You xref:../../installing/installing_gcp/installing-gcp-account.adoc#installing-gcp-account[configured a {gcp-full} project] to host the cluster. * If you use a firewall, you xref:../../installing/install_config/configuring-firewall.adoc#configuring-firewall[configured it to allow the sites] that your cluster requires access to. include::modules/cluster-entitlements.adoc[leveloffset=+1] diff --git a/installing/installing_gcp/installing-gcp-multiarch-support.adoc b/installing/installing_gcp/installing-gcp-multiarch-support.adoc index dddf487fb885..6ccc32d41c47 100644 --- a/installing/installing_gcp/installing-gcp-multiarch-support.adoc +++ b/installing/installing_gcp/installing-gcp-multiarch-support.adoc @@ -13,7 +13,7 @@ An {product-title} cluster with multi-architecture compute machines supports com When you have nodes with multiple architectures in your cluster, the architecture of your image must be consistent with the architecture of the node. You must ensure that the pod is assigned to the node with the appropriate architecture and that it matches the image architecture. For more information on assigning pods to nodes, xref:../../post_installation_configuration/configuring-multi-arch-compute-machines/multi-architecture-compute-managing.adoc#scheduling-workloads-on-clusters-with-multi-architecture-compute-machines[Scheduling workloads on clusters with multi-architecture compute machines]. ==== -You can install a {gcp-first} cluster with the support for configuring multi-architecture compute machines. After installing the GCP cluster, you can add multi-architecture compute machines to the cluster in the following ways: +You can install a {gcp-first} cluster with the support for configuring multi-architecture compute machines. After installing the {gcp-full} cluster, you can add multi-architecture compute machines to the cluster in the following ways: * Adding 64-bit x86 compute machines to a cluster that uses 64-bit ARM control plane machines and already includes 64-bit ARM compute machines. In this case, 64-bit x86 is considered the secondary architecture. * Adding 64-bit ARM compute machines to a cluster that uses 64-bit x86 control plane machines and already includes 64-bit x86 compute machines. In this case, 64-bit ARM is considered the secondary architecture. diff --git a/installing/installing_gcp/installing-gcp-network-customizations.adoc b/installing/installing_gcp/installing-gcp-network-customizations.adoc index fa1dfc101849..063f7d914343 100644 --- a/installing/installing_gcp/installing-gcp-network-customizations.adoc +++ b/installing/installing_gcp/installing-gcp-network-customizations.adoc @@ -1,14 +1,14 @@ :_mod-docs-content-type: ASSEMBLY -[id="installing-gcp-network-customizations"] -= Installing a cluster on GCP with network customizations include::_attributes/common-attributes.adoc[] +[id="installing-gcp-network-customizations"] += Installing a cluster on {gcp-full} with network customizations :context: installing-gcp-network-customizations toc::[] In {product-title} version {product-version}, you can install a cluster with a customized network configuration on infrastructure that the installation program -provisions on Google Cloud Platform (GCP). By customizing your network +provisions on {gcp-full}. By customizing your network configuration, your cluster can coexist with existing IP address allocations in your environment and integrate with existing MTU and VXLAN configurations. To customize the installation, you modify parameters in the `install-config.yaml` @@ -22,7 +22,7 @@ cluster. * You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. * You read the documentation on xref:../../installing/overview/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* You xref:../../installing/installing_gcp/installing-gcp-account.adoc#installing-gcp-account[configured a GCP project] to host the cluster. +* You xref:../../installing/installing_gcp/installing-gcp-account.adoc#installing-gcp-account[configured a {gcp-full} project] to host the cluster. * If you use a firewall, you xref:../../installing/install_config/configuring-firewall.adoc#configuring-firewall[configured it to allow the sites] that your cluster requires access to. include::modules/cluster-entitlements.adoc[leveloffset=+1] @@ -35,7 +35,7 @@ include::modules/installation-initializing.adoc[leveloffset=+1] [role="_additional-resources"] .Additional resources -* xref:../../installing/installing_gcp/installation-config-parameters-gcp.adoc#installation-config-parameters-gcp[Installation configuration parameters for GCP] +* xref:../../installing/installing_gcp/installation-config-parameters-gcp.adoc#installation-config-parameters-gcp[Installation configuration parameters for {gcp-full}] include::modules/installation-minimum-resource-requirements.adoc[leveloffset=+2] @@ -83,21 +83,21 @@ By default, administrator secrets are stored in the `kube-system` project. If yo * To manage long-term cloud credentials manually, follow the procedure in xref:../../installing/installing_gcp/installing-gcp-network-customizations.adoc#manually-create-iam_installing-gcp-network-customizations[Manually creating long-term credentials]. -* To implement short-term credentials that are managed outside the cluster for individual components, follow the procedures in xref:../../installing/installing_gcp/installing-gcp-network-customizations.adoc#installing-gcp-with-short-term-creds_installing-gcp-network-customizations[Configuring a GCP cluster to use short-term credentials]. +* To implement short-term credentials that are managed outside the cluster for individual components, follow the procedures in xref:../../installing/installing_gcp/installing-gcp-network-customizations.adoc#installing-gcp-with-short-term-creds_installing-gcp-network-customizations[Configuring a {gcp-full} cluster to use short-term credentials]. //Manually creating long-term credentials include::modules/manually-create-identity-access-management.adoc[leveloffset=+2] -//Supertask: Configuring a GCP cluster to use short-term credentials +//Supertask: Configuring a {gcp-full} cluster to use short-term credentials [id="installing-gcp-with-short-term-creds_{context}"] -=== Configuring a GCP cluster to use short-term credentials +=== Configuring a {gcp-full} cluster to use short-term credentials -To install a cluster that is configured to use GCP Workload Identity, you must configure the CCO utility and create the required GCP resources for your cluster. +To install a cluster that is configured to use {gcp-full} Workload Identity, you must configure the CCO utility and create the required {gcp-full} resources for your cluster. //Task part 1: Configuring the Cloud Credential Operator utility include::modules/cco-ccoctl-configuring.adoc[leveloffset=+3] -//Task part 2: Creating the required GCP resources +//Task part 2: Creating the required {gcp-full} resources include::modules/cco-ccoctl-creating-at-once.adoc[leveloffset=+3] //Task part 3: Incorporating the Cloud Credential Operator utility manifests diff --git a/installing/installing_gcp/installing-gcp-private.adoc b/installing/installing_gcp/installing-gcp-private.adoc index c8a8bfa12946..04b261e1f9dd 100644 --- a/installing/installing_gcp/installing-gcp-private.adoc +++ b/installing/installing_gcp/installing-gcp-private.adoc @@ -1,19 +1,19 @@ :_mod-docs-content-type: ASSEMBLY -[id="installing-gcp-private"] -= Installing a private cluster on GCP include::_attributes/common-attributes.adoc[] +[id="installing-gcp-private"] += Installing a private cluster on {gcp-full} :context: installing-gcp-private toc::[] -In {product-title} version {product-version}, you can install a private cluster into an existing VPC on Google Cloud Platform (GCP). The installation program provisions the rest of the required infrastructure, which you can further customize. To customize the installation, you modify +In {product-title} version {product-version}, you can install a private cluster into an existing VPC on {gcp-full}. The installation program provisions the rest of the required infrastructure, which you can further customize. To customize the installation, you modify parameters in the `install-config.yaml` file before you install the cluster. == Prerequisites * You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. * You read the documentation on xref:../../installing/overview/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* You xref:../../installing/installing_gcp/installing-gcp-account.adoc#installing-gcp-account[configured a GCP project] to host the cluster. +* You xref:../../installing/installing_gcp/installing-gcp-account.adoc#installing-gcp-account[configured a {gcp-full} project] to host the cluster. * If you use a firewall, you xref:../../installing/install_config/configuring-firewall.adoc#configuring-firewall[configured it to allow the sites] that your cluster requires access to. include::modules/private-clusters-default.adoc[leveloffset=+1] @@ -32,7 +32,7 @@ include::modules/installation-initializing-manual.adoc[leveloffset=+1] [role="_additional-resources"] .Additional resources -* xref:../../installing/installing_gcp/installation-config-parameters-gcp.adoc#installation-config-parameters-gcp[Installation configuration parameters for GCP] +* xref:../../installing/installing_gcp/installation-config-parameters-gcp.adoc#installation-config-parameters-gcp[Installation configuration parameters for {gcp-full}] include::modules/installation-minimum-resource-requirements.adoc[leveloffset=+2] @@ -82,21 +82,21 @@ By default, administrator secrets are stored in the `kube-system` project. If yo * To manage long-term cloud credentials manually, follow the procedure in xref:../../installing/installing_gcp/installing-gcp-private.adoc#manually-create-iam_installing-gcp-private[Manually creating long-term credentials]. -* To implement short-term credentials that are managed outside the cluster for individual components, follow the procedures in xref:../../installing/installing_gcp/installing-gcp-private.adoc#installing-gcp-with-short-term-creds_installing-gcp-private[Configuring a GCP cluster to use short-term credentials]. +* To implement short-term credentials that are managed outside the cluster for individual components, follow the procedures in xref:../../installing/installing_gcp/installing-gcp-private.adoc#installing-gcp-with-short-term-creds_installing-gcp-private[Configuring a {gcp-full} cluster to use short-term credentials]. //Manually creating long-term credentials include::modules/manually-create-identity-access-management.adoc[leveloffset=+2] -//Supertask: Configuring a GCP cluster to use short-term credentials +//Supertask: Configuring a {gcp-full} cluster to use short-term credentials [id="installing-gcp-with-short-term-creds_{context}"] -=== Configuring a GCP cluster to use short-term credentials +=== Configuring a {gcp-full} cluster to use short-term credentials -To install a cluster that is configured to use GCP Workload Identity, you must configure the CCO utility and create the required GCP resources for your cluster. +To install a cluster that is configured to use {gcp-full} Workload Identity, you must configure the CCO utility and create the required {gcp-full} resources for your cluster. //Task part 1: Configuring the Cloud Credential Operator utility include::modules/cco-ccoctl-configuring.adoc[leveloffset=+3] -//Task part 2: Creating the required GCP resources +//Task part 2: Creating the required {gcp-full} resources include::modules/cco-ccoctl-creating-at-once.adoc[leveloffset=+3] //Task part 3: Incorporating the Cloud Credential Operator utility manifests diff --git a/installing/installing_gcp/installing-gcp-shared-vpc.adoc b/installing/installing_gcp/installing-gcp-shared-vpc.adoc index e66f7751b94e..d09d2479b8b4 100644 --- a/installing/installing_gcp/installing-gcp-shared-vpc.adoc +++ b/installing/installing_gcp/installing-gcp-shared-vpc.adoc @@ -3,7 +3,7 @@ = Installing a cluster on {gcp-short} into a shared VPC include::_attributes/common-attributes.adoc[] :context: installing-gcp-shared-vpc -:FeatureName: Installing a cluster on GCP into a shared VPC +:FeatureName: Installing a cluster on {gcp-full} into a shared VPC toc::[] @@ -69,7 +69,7 @@ By default, administrator secrets are stored in the `kube-system` project. If yo //Manually creating long-term credentials include::modules/manually-create-identity-access-management.adoc[leveloffset=+2] -//Supertask: Configuring a GCP cluster to use short-term credentials +//Supertask: Configuring a {gcp-full} cluster to use short-term credentials [id="installing-gcp-with-short-term-creds_{context}"] === Configuring a {gcp-short} cluster to use short-term credentials @@ -78,7 +78,7 @@ To install a cluster that is configured to use {gcp-short} Workload Identity, yo //Task part 1: Configuring the Cloud Credential Operator utility include::modules/cco-ccoctl-configuring.adoc[leveloffset=+3] -//Task part 2: Creating the required GCP resources +//Task part 2: Creating the required {gcp-full} resources include::modules/cco-ccoctl-creating-at-once.adoc[leveloffset=+3] //Task part 3: Incorporating the Cloud Credential Operator utility manifests diff --git a/installing/installing_gcp/installing-gcp-three-node.adoc b/installing/installing_gcp/installing-gcp-three-node.adoc index aa7c4dfc3535..4a77390780d2 100644 --- a/installing/installing_gcp/installing-gcp-three-node.adoc +++ b/installing/installing_gcp/installing-gcp-three-node.adoc @@ -1,17 +1,17 @@ :_mod-docs-content-type: ASSEMBLY -[id="installing-gcp-three-node"] -= Installing a three-node cluster on GCP include::_attributes/common-attributes.adoc[] +[id="installing-gcp-three-node"] += Installing a three-node cluster on {gcp-full} :context: installing-gcp-three-node toc::[] -In {product-title} version {product-version}, you can install a three-node cluster on Google Cloud Platform (GCP). A three-node cluster consists of three control plane machines, which also act as compute machines. This type of cluster provides a smaller, more resource efficient cluster, for cluster administrators and developers to use for testing, development, and production. +In {product-title} version {product-version}, you can install a three-node cluster on {gcp-full}. A three-node cluster consists of three control plane machines, which also act as compute machines. This type of cluster provides a smaller, more resource efficient cluster, for cluster administrators and developers to use for testing, development, and production. You can install a three-node cluster using either installer-provisioned or user-provisioned infrastructure. include::modules/installation-three-node-cluster-cloud-provider.adoc[leveloffset=+1] == Next steps -* xref:../../installing/installing_gcp/installing-gcp-customizations.adoc#installing-gcp-customizations[Installing a cluster on GCP with customizations] -* xref:../../installing/installing_gcp/installing-gcp-user-infra.adoc#installing-gcp-user-infra[Installing a cluster on user-provisioned infrastructure in GCP by using Deployment Manager templates] +* xref:../../installing/installing_gcp/installing-gcp-customizations.adoc#installing-gcp-customizations[Installing a cluster on {gcp-full} with customizations] +* xref:../../installing/installing_gcp/installing-gcp-user-infra.adoc#installing-gcp-user-infra[Installing a cluster on user-provisioned infrastructure in {gcp-full} by using Deployment Manager templates] diff --git a/installing/installing_gcp/installing-gcp-user-infra-vpc.adoc b/installing/installing_gcp/installing-gcp-user-infra-vpc.adoc index 2e7ed420ef0b..0e863f4287c8 100644 --- a/installing/installing_gcp/installing-gcp-user-infra-vpc.adoc +++ b/installing/installing_gcp/installing-gcp-user-infra-vpc.adoc @@ -1,14 +1,14 @@ :_mod-docs-content-type: ASSEMBLY -[id="installing-gcp-user-infra-vpc"] -= Installing a cluster into a shared VPC on GCP using Deployment Manager templates include::_attributes/common-attributes.adoc[] +[id="installing-gcp-user-infra-vpc"] += Installing a cluster into a shared VPC on {gcp-full} using Deployment Manager templates :context: installing-gcp-user-infra-vpc toc::[] -In {product-title} version {product-version}, you can install a cluster into a shared Virtual Private Cloud (VPC) on Google Cloud Platform (GCP) that uses infrastructure that you provide. In this context, a cluster installed into a shared VPC is a cluster that is configured to use a VPC from a project different from where the cluster is being deployed. +In {product-title} version {product-version}, you can install a cluster into a shared Virtual Private Cloud (VPC) on {gcp-full} that uses infrastructure that you provide. In this context, a cluster installed into a shared VPC is a cluster that is configured to use a VPC from a project different from where the cluster is being deployed. -A shared VPC enables an organization to connect resources from multiple projects to a common VPC network. You can communicate within the organization securely and efficiently by using internal IPs from that network. For more information about shared VPC, see link:https://cloud.google.com/vpc/docs/shared-vpc[Shared VPC overview] in the GCP documentation. +A shared VPC enables an organization to connect resources from multiple projects to a common VPC network. You can communicate within the organization securely and efficiently by using internal IPs from that network. For more information about shared VPC, see link:https://cloud.google.com/vpc/docs/shared-vpc[Shared VPC overview] in the {gcp-full} documentation. The steps for performing a user-provided infrastructure installation into a shared VPC are outlined here. Several link:https://cloud.google.com/deployment-manager/docs[Deployment Manager] templates are provided to assist in @@ -38,10 +38,9 @@ include::modules/csr-management.adoc[leveloffset=+1] include::modules/cluster-entitlements.adoc[leveloffset=+1] [id="installation-gcp-user-infra-config-project-vpc"] -== Configuring the GCP project that hosts your cluster +== Configuring the {gcp-full} project that hosts your cluster -Before you can install {product-title}, you must configure a Google Cloud -Platform (GCP) project to host it. +Before you can install {product-title}, you must configure a {gcp-full} project to host it. include::modules/installation-gcp-project.adoc[leveloffset=+2] include::modules/installation-gcp-enabling-api-services.adoc[leveloffset=+2] @@ -81,7 +80,7 @@ include::modules/installation-initializing-manual.adoc[leveloffset=+2] [role="_additional-resources"] .Additional resources -* xref:../../installing/installing_gcp/installation-config-parameters-gcp.adoc#installation-config-parameters-gcp[Installation configuration parameters for GCP] +* xref:../../installing/installing_gcp/installation-config-parameters-gcp.adoc#installation-config-parameters-gcp[Installation configuration parameters for {gcp-full}] include::modules/installation-gcp-enabling-shielded-vms.adoc[leveloffset=+2] include::modules/installation-gcp-enabling-confidential-vms.adoc[leveloffset=+2] @@ -132,7 +131,7 @@ include::modules/installation-deployment-manager-bootstrap.adoc[leveloffset=+2] include::modules/installation-creating-gcp-control-plane.adoc[leveloffset=+1] include::modules/installation-deployment-manager-control-plane.adoc[leveloffset=+2] -// Removing bootstrap resources in GCP +// Removing bootstrap resources in {gcp-full} include::modules/installation-gcp-user-infra-wait-for-bootstrap.adoc[leveloffset=+1] include::modules/installation-creating-gcp-worker.adoc[leveloffset=+1] @@ -148,7 +147,7 @@ include::modules/installation-gcp-user-infra-adding-ingress.adoc[leveloffset=+1] [id="installation-gcp-user-infra-vpc-adding-firewall-rules"] == Adding ingress firewall rules -The cluster requires several firewall rules. If you do not use a shared VPC, these rules are created by the Ingress Controller via the GCP cloud provider. When you use a shared VPC, you can either create cluster-wide firewall rules for all services now or create each rule based on events, when the cluster requests access. By creating each rule when the cluster requests access, you know exactly which firewall rules are required. By creating cluster-wide firewall rules, you can apply the same rule set across multiple clusters. +The cluster requires several firewall rules. If you do not use a shared VPC, these rules are created by the Ingress Controller via the {gcp-full} cloud provider. When you use a shared VPC, you can either create cluster-wide firewall rules for all services now or create each rule based on events, when the cluster requests access. By creating each rule when the cluster requests access, you know exactly which firewall rules are required. By creating cluster-wide firewall rules, you can apply the same rule set across multiple clusters. If you choose to create each rule based on events, you must create firewall rules after you provision the cluster and during the life of the cluster when the console notifies you that rules are missing. Events that are similar to the following event are displayed, and you must add the firewall rules that are required: diff --git a/installing/installing_gcp/installing-gcp-user-infra.adoc b/installing/installing_gcp/installing-gcp-user-infra.adoc index 5e167043d16b..43ab37d2b6cb 100644 --- a/installing/installing_gcp/installing-gcp-user-infra.adoc +++ b/installing/installing_gcp/installing-gcp-user-infra.adoc @@ -1,13 +1,13 @@ :_mod-docs-content-type: ASSEMBLY -[id="installing-gcp-user-infra"] -= Installing a cluster on user-provisioned infrastructure in GCP by using Deployment Manager templates include::_attributes/common-attributes.adoc[] +[id="installing-gcp-user-infra"] += Installing a cluster on user-provisioned infrastructure in {gcp-full} by using Deployment Manager templates :context: installing-gcp-user-infra :platform: GCP toc::[] -In {product-title} version {product-version}, you can install a cluster on Google Cloud Platform (GCP) that uses infrastructure that you provide. +In {product-title} version {product-version}, you can install a cluster on {gcp-full} that uses infrastructure that you provide. The steps for performing a user-provided infrastructure install are outlined here. Several link:https://cloud.google.com/deployment-manager/docs[Deployment Manager] templates are provided to assist in completing these steps or to help model your own. You are also free to create the required resources through other methods. @@ -33,9 +33,9 @@ include::modules/csr-management.adoc[leveloffset=+1] include::modules/cluster-entitlements.adoc[leveloffset=+1] [id="installation-gcp-user-infra-config-project"] -== Configuring your GCP project +== Configuring your {gcp-full} project -Before you can install {product-title}, you must configure a Google Cloud Platform (GCP) project to host it. +Before you can install {product-title}, you must configure a {gcp-full} project to host it. include::modules/installation-gcp-project.adoc[leveloffset=+2] include::modules/installation-gcp-enabling-api-services.adoc[leveloffset=+2] @@ -79,7 +79,7 @@ include::modules/installation-initializing.adoc[leveloffset=+2] [role="_additional-resources"] .Additional resources -* xref:../../installing/installing_gcp/installation-config-parameters-gcp.adoc#installation-config-parameters-gcp[Installation configuration parameters for GCP] +* xref:../../installing/installing_gcp/installation-config-parameters-gcp.adoc#installation-config-parameters-gcp[Installation configuration parameters for {gcp-full}] include::modules/installation-gcp-enabling-shielded-vms.adoc[leveloffset=+2] include::modules/installation-gcp-enabling-confidential-vms.adoc[leveloffset=+2] @@ -129,7 +129,7 @@ include::modules/installation-deployment-manager-bootstrap.adoc[leveloffset=+2] include::modules/installation-creating-gcp-control-plane.adoc[leveloffset=+1] include::modules/installation-deployment-manager-control-plane.adoc[leveloffset=+2] -// Removing bootstrap resources in GCP +// Removing bootstrap resources in {gcp-full} include::modules/installation-gcp-user-infra-wait-for-bootstrap.adoc[leveloffset=+1] include::modules/installation-creating-gcp-worker.adoc[leveloffset=+1] diff --git a/installing/installing_gcp/installing-gcp-vpc.adoc b/installing/installing_gcp/installing-gcp-vpc.adoc index 7fd8a8c5f54c..bbc5d2780a6d 100644 --- a/installing/installing_gcp/installing-gcp-vpc.adoc +++ b/installing/installing_gcp/installing-gcp-vpc.adoc @@ -1,19 +1,19 @@ :_mod-docs-content-type: ASSEMBLY -[id="installing-gcp-vpc"] -= Installing a cluster on GCP into an existing VPC include::_attributes/common-attributes.adoc[] +[id="installing-gcp-vpc"] += Installing a cluster on {gcp-full} into an existing VPC :context: installing-gcp-vpc toc::[] -In {product-title} version {product-version}, you can install a cluster into an existing Virtual Private Cloud (VPC) on Google Cloud Platform (GCP). The installation program provisions the rest of the required infrastructure, which you can further customize. To customize the installation, you modify +In {product-title} version {product-version}, you can install a cluster into an existing Virtual Private Cloud (VPC) on {gcp-full}. The installation program provisions the rest of the required infrastructure, which you can further customize. To customize the installation, you modify parameters in the `install-config.yaml` file before you install the cluster. == Prerequisites * You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. * You read the documentation on xref:../../installing/overview/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* You xref:../../installing/installing_gcp/installing-gcp-account.adoc#installing-gcp-account[configured a GCP project] to host the cluster. +* You xref:../../installing/installing_gcp/installing-gcp-account.adoc#installing-gcp-account[configured a {gcp-full} project] to host the cluster. * If you use a firewall, you xref:../../installing/install_config/configuring-firewall.adoc#configuring-firewall[configured it to allow the sites] that your cluster requires access to. include::modules/installation-custom-gcp-vpc.adoc[leveloffset=+1] @@ -28,7 +28,7 @@ include::modules/installation-initializing.adoc[leveloffset=+1] [role="_additional-resources"] .Additional resources -* xref:../../installing/installing_gcp/installation-config-parameters-gcp.adoc#installation-config-parameters-gcp[Installation configuration parameters for GCP] +* xref:../../installing/installing_gcp/installation-config-parameters-gcp.adoc#installation-config-parameters-gcp[Installation configuration parameters for {gcp-full}] include::modules/installation-minimum-resource-requirements.adoc[leveloffset=+2] @@ -78,21 +78,21 @@ By default, administrator secrets are stored in the `kube-system` project. If yo * To manage long-term cloud credentials manually, follow the procedure in xref:../../installing/installing_gcp/installing-gcp-vpc.adoc#manually-create-iam_installing-gcp-vpc[Manually creating long-term credentials]. -* To implement short-term credentials that are managed outside the cluster for individual components, follow the procedures in xref:../../installing/installing_gcp/installing-gcp-vpc.adoc#installing-gcp-with-short-term-creds_installing-gcp-vpc[Configuring a GCP cluster to use short-term credentials]. +* To implement short-term credentials that are managed outside the cluster for individual components, follow the procedures in xref:../../installing/installing_gcp/installing-gcp-vpc.adoc#installing-gcp-with-short-term-creds_installing-gcp-vpc[Configuring a {gcp-full} cluster to use short-term credentials]. //Manually creating long-term credentials include::modules/manually-create-identity-access-management.adoc[leveloffset=+2] -//Supertask: Configuring a GCP cluster to use short-term credentials +//Supertask: Configuring a {gcp-full} cluster to use short-term credentials [id="installing-gcp-with-short-term-creds_{context}"] -=== Configuring a GCP cluster to use short-term credentials +=== Configuring a {gcp-full} cluster to use short-term credentials -To install a cluster that is configured to use GCP Workload Identity, you must configure the CCO utility and create the required GCP resources for your cluster. +To install a cluster that is configured to use {gcp-full} Workload Identity, you must configure the CCO utility and create the required {gcp-full} resources for your cluster. //Task part 1: Configuring the Cloud Credential Operator utility include::modules/cco-ccoctl-configuring.adoc[leveloffset=+3] -//Task part 2: Creating the required GCP resources +//Task part 2: Creating the required {gcp-full} resources include::modules/cco-ccoctl-creating-at-once.adoc[leveloffset=+3] //Task part 3: Incorporating the Cloud Credential Operator utility manifests diff --git a/installing/installing_gcp/installing-restricted-networks-gcp-installer-provisioned.adoc b/installing/installing_gcp/installing-restricted-networks-gcp-installer-provisioned.adoc index 350807af03b3..8b096f1905b9 100644 --- a/installing/installing_gcp/installing-restricted-networks-gcp-installer-provisioned.adoc +++ b/installing/installing_gcp/installing-restricted-networks-gcp-installer-provisioned.adoc @@ -1,16 +1,16 @@ :_mod-docs-content-type: ASSEMBLY -[id="installing-restricted-networks-gcp-installer-provisioned"] -= Installing a cluster on GCP in a disconnected environment include::_attributes/common-attributes.adoc[] +[id="installing-restricted-networks-gcp-installer-provisioned"] += Installing a cluster on {gcp-full} in a disconnected environment :context: installing-restricted-networks-gcp-installer-provisioned toc::[] -In {product-title} {product-version}, you can install a cluster on Google Cloud Platform (GCP) in a restricted network by creating an internal mirror of the installation release content on an existing Google Virtual Private Cloud (VPC). +In {product-title} {product-version}, you can install a cluster on {gcp-full} in a restricted network by creating an internal mirror of the installation release content on an existing Google Virtual Private Cloud (VPC). [IMPORTANT] ==== -You can install an {product-title} cluster by using mirrored installation release content, but your cluster will require internet access to use the GCP APIs. +You can install an {product-title} cluster by using mirrored installation release content, but your cluster will require internet access to use the {gcp-full} APIs. ==== [id="prerequisites_installing-restricted-networks-gcp-installer-provisioned"] @@ -18,14 +18,14 @@ You can install an {product-title} cluster by using mirrored installation releas * You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. * You read the documentation on xref:../../installing/overview/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* You xref:../../installing/installing_gcp/installing-gcp-account.adoc#installing-gcp-account[configured a GCP project] to host the cluster. +* You xref:../../installing/installing_gcp/installing-gcp-account.adoc#installing-gcp-account[configured a {gcp-full} project] to host the cluster. * You xref:../../disconnected/installing-mirroring-installation-images.adoc#installation-about-mirror-registry_installing-mirroring-installation-images[mirrored the images for a disconnected installation] to your registry and obtained the `imageContentSources` data for your version of {product-title}. + [IMPORTANT] ==== Because the installation media is on the mirror host, you can use that computer to complete all installation steps. ==== -* You have an existing VPC in GCP. While installing a cluster in a restricted network that uses installer-provisioned infrastructure, you cannot use the installer-provisioned VPC. You must use a user-provisioned VPC that satisfies one of the following requirements: +* You have an existing VPC in {gcp-full}. While installing a cluster in a restricted network that uses installer-provisioned infrastructure, you cannot use the installer-provisioned VPC. You must use a user-provisioned VPC that satisfies one of the following requirements: ** Contains the mirror registry ** Has firewall rules or a peering connection to access the mirror registry hosted elsewhere * If you use a firewall, you xref:../../installing/install_config/configuring-firewall.adoc#configuring-firewall[configured it to allow the sites] that your cluster requires access to. While you might need to grant access to more sites, you must grant access to `*.googleapis.com` and `accounts.google.com`. @@ -40,7 +40,7 @@ include::modules/installation-initializing.adoc[leveloffset=+1] [role="_additional-resources"] .Additional resources -* xref:../../installing/installing_gcp/installation-config-parameters-gcp.adoc#installation-config-parameters-gcp[Installation configuration parameters for GCP] +* xref:../../installing/installing_gcp/installation-config-parameters-gcp.adoc#installation-config-parameters-gcp[Installation configuration parameters for {gcp-full}] include::modules/installation-minimum-resource-requirements.adoc[leveloffset=+2] @@ -85,21 +85,21 @@ By default, administrator secrets are stored in the `kube-system` project. If yo * To manage long-term cloud credentials manually, follow the procedure in xref:../../installing/installing_gcp/installing-restricted-networks-gcp-installer-provisioned.adoc#manually-create-iam_installing-restricted-networks-gcp-installer-provisioned[Manually creating long-term credentials]. -* To implement short-term credentials that are managed outside the cluster for individual components, follow the procedures in xref:../../installing/installing_gcp/installing-restricted-networks-gcp-installer-provisioned.adoc#installing-gcp-with-short-term-creds_installing-restricted-networks-gcp-installer-provisioned[Configuring a GCP cluster to use short-term credentials]. +* To implement short-term credentials that are managed outside the cluster for individual components, follow the procedures in xref:../../installing/installing_gcp/installing-restricted-networks-gcp-installer-provisioned.adoc#installing-gcp-with-short-term-creds_installing-restricted-networks-gcp-installer-provisioned[Configuring a {gcp-full} cluster to use short-term credentials]. //Manually creating long-term credentials include::modules/manually-create-identity-access-management.adoc[leveloffset=+2] -//Supertask: Configuring a GCP cluster to use short-term credentials +//Supertask: Configuring a {gcp-full} cluster to use short-term credentials [id="installing-gcp-with-short-term-creds_{context}"] -=== Configuring a GCP cluster to use short-term credentials +=== Configuring a {gcp-full} cluster to use short-term credentials -To install a cluster that is configured to use GCP Workload Identity, you must configure the CCO utility and create the required GCP resources for your cluster. +To install a cluster that is configured to use {gcp-full} Workload Identity, you must configure the CCO utility and create the required {gcp-full} resources for your cluster. //Task part 1: Configuring the Cloud Credential Operator utility include::modules/cco-ccoctl-configuring.adoc[leveloffset=+3] -//Task part 2: Creating the required GCP resources +//Task part 2: Creating the required {gcp-full} resources include::modules/cco-ccoctl-creating-at-once.adoc[leveloffset=+3] //Task part 3: Incorporating the Cloud Credential Operator utility manifests diff --git a/installing/installing_gcp/installing-restricted-networks-gcp.adoc b/installing/installing_gcp/installing-restricted-networks-gcp.adoc index 5647535ac120..95e024e6d028 100644 --- a/installing/installing_gcp/installing-restricted-networks-gcp.adoc +++ b/installing/installing_gcp/installing-restricted-networks-gcp.adoc @@ -1,16 +1,16 @@ :_mod-docs-content-type: ASSEMBLY -[id="installing-restricted-networks-gcp"] -= Installing a cluster on GCP in a disconnected environment with user-provisioned infrastructure include::_attributes/common-attributes.adoc[] +[id="installing-restricted-networks-gcp"] += Installing a cluster on {gcp-full} in a disconnected environment with user-provisioned infrastructure :context: installing-restricted-networks-gcp toc::[] -In {product-title} version {product-version}, you can install a cluster on Google Cloud Platform (GCP) that uses infrastructure that you provide and an internal mirror of the installation release content. +In {product-title} version {product-version}, you can install a cluster on {gcp-full} that uses infrastructure that you provide and an internal mirror of the installation release content. [IMPORTANT] ==== -While you can install an {product-title} cluster by using mirrored installation release content, your cluster still requires internet access to use the GCP APIs. +While you can install an {product-title} cluster by using mirrored installation release content, your cluster still requires internet access to use the {gcp-full} APIs. ==== The steps for performing a user-provided infrastructure install are outlined here. Several link:https://cloud.google.com/deployment-manager/docs[Deployment Manager] templates are provided to assist in completing these steps or to help model your own. You are also free to create the required resources through other methods. @@ -39,9 +39,9 @@ include::modules/installation-about-restricted-network.adoc[leveloffset=+1] include::modules/cluster-entitlements.adoc[leveloffset=+1] [id="installation-restricted-networks-gcp-user-infra-config-project"] -== Configuring your GCP project +== Configuring your {gcp-full} project -Before you can install {product-title}, you must configure a Google Cloud Platform (GCP) project to host it. +Before you can install {product-title}, you must configure a {gcp-full} project to host it. include::modules/installation-gcp-project.adoc[leveloffset=+2] include::modules/installation-gcp-enabling-api-services.adoc[leveloffset=+2] @@ -78,7 +78,7 @@ include::modules/installation-initializing.adoc[leveloffset=+2] [role="_additional-resources"] .Additional resources -* xref:../../installing/installing_gcp/installation-config-parameters-gcp.adoc#installation-config-parameters-gcp[Installation configuration parameters for GCP] +* xref:../../installing/installing_gcp/installation-config-parameters-gcp.adoc#installation-config-parameters-gcp[Installation configuration parameters for {gcp-full}] include::modules/installation-gcp-enabling-shielded-vms.adoc[leveloffset=+2] include::modules/installation-gcp-enabling-confidential-vms.adoc[leveloffset=+2] diff --git a/installing/installing_gcp/preparing-to-install-on-gcp.adoc b/installing/installing_gcp/preparing-to-install-on-gcp.adoc index 8701d0740d88..dbfbe8f0003c 100644 --- a/installing/installing_gcp/preparing-to-install-on-gcp.adoc +++ b/installing/installing_gcp/preparing-to-install-on-gcp.adoc @@ -1,7 +1,7 @@ :_mod-docs-content-type: ASSEMBLY -[id="preparing-to-install-on-gcp"] -= Preparing to install on GCP include::_attributes/common-attributes.adoc[] +[id="preparing-to-install-on-gcp"] += Preparing to install on {gcp-full} :context: preparing-to-install-on-gcp toc::[] @@ -14,14 +14,14 @@ toc::[] * You read the documentation on xref:../../installing/overview/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. [id="requirements-for-installing-ocp-on-gcp"] -== Requirements for installing {product-title} on GCP +== Requirements for installing {product-title} on {gcp-full} -Before installing {product-title} on Google Cloud Platform (GCP), you must create a service account and configure a GCP project. See xref:../../installing/installing_gcp/installing-gcp-account.adoc#installing-gcp-account[Configuring a GCP project] for details about creating a project, enabling API services, configuring DNS, GCP account limits, and supported GCP regions. +Before installing {product-title} on {gcp-full}, you must create a service account and configure a {gcp-full} project. See xref:../../installing/installing_gcp/installing-gcp-account.adoc#installing-gcp-account[Configuring a {gcp-full} project] for details about creating a project, enabling API services, configuring DNS, {gcp-full} account limits, and supported {gcp-full} regions. -If the cloud identity and access management (IAM) APIs are not accessible in your environment, or if you do not want to store an administrator-level credential secret in the `kube-system` namespace, see xref:../../installing/installing_gcp/installing-gcp-customizations.adoc#manually-create-iam_installing-gcp-customizations[Manually creating long-term credentials for GCP] for other options. +If the cloud identity and access management (IAM) APIs are not accessible in your environment, or if you do not want to store an administrator-level credential secret in the `kube-system` namespace, see xref:../../installing/installing_gcp/installing-gcp-customizations.adoc#manually-create-iam_installing-gcp-customizations[Manually creating long-term credentials for {gcp-full}] for other options. [id="choosing-an-method-to-install-ocp-on-gcp"] -== Choosing a method to install {product-title} on GCP +== Choosing a method to install {product-title} on {gcp-full} You can install {product-title} on installer-provisioned or user-provisioned infrastructure. The default installation type uses installer-provisioned infrastructure, where the installation program provisions the underlying infrastructure for the cluster. You can also install {product-title} on infrastructure that you provision. If you do not use infrastructure that the installation program provisions, you must manage and maintain the cluster resources yourself. @@ -30,30 +30,30 @@ See xref:../../architecture/architecture-installation.adoc#installation-process_ [id="choosing-an-method-to-install-ocp-on-gcp-installer-provisioned"] === Installing a cluster on installer-provisioned infrastructure -You can install a cluster on GCP infrastructure that is provisioned by the {product-title} installation program, by using one of the following methods: +You can install a cluster on {gcp-full} infrastructure that is provisioned by the {product-title} installation program, by using one of the following methods: -* **xref:../../installing/installing_gcp/installing-gcp-default.adoc#installing-gcp-default[Installing a cluster quickly on GCP]**: You can install {product-title} on GCP infrastructure that is provisioned by the {product-title} installation program. You can install a cluster quickly by using the default configuration options. +* **xref:../../installing/installing_gcp/installing-gcp-default.adoc#installing-gcp-default[Installing a cluster quickly on {gcp-full}]**: You can install {product-title} on {gcp-full} infrastructure that is provisioned by the {product-title} installation program. You can install a cluster quickly by using the default configuration options. -* **xref:../../installing/installing_gcp/installing-gcp-customizations.adoc#installing-gcp-customizations[Installing a customized cluster on GCP]**: You can install a customized cluster on GCP infrastructure that the installation program provisions. You can customize your {product-title} network configuration during installation, so that your cluster can coexist with your existing IP address allocations and adhere to your network requirements. The installation program allows for some customization to be applied at the installation stage. Many other customization options are available xref:../../post_installation_configuration/cluster-tasks.adoc#post-install-cluster-tasks[post-installation]. +* **xref:../../installing/installing_gcp/installing-gcp-customizations.adoc#installing-gcp-customizations[Installing a customized cluster on {gcp-full}]**: You can install a customized cluster on {gcp-full} infrastructure that the installation program provisions. You can customize your {product-title} network configuration during installation, so that your cluster can coexist with your existing IP address allocations and adhere to your network requirements. The installation program allows for some customization to be applied at the installation stage. Many other customization options are available xref:../../post_installation_configuration/cluster-tasks.adoc#post-install-cluster-tasks[post-installation]. -* **xref:../../installing/installing_gcp/installing-restricted-networks-gcp-installer-provisioned.adoc#installing-restricted-networks-gcp-installer-provisioned[Installing a cluster on GCP in a restricted network]**: You can install {product-title} on GCP on installer-provisioned infrastructure by using an internal mirror of the installation release content. You can use this method to install a cluster that does not require an active internet connection to obtain the software components. While you can install {product-title} by using the mirrored content, your cluster still requires internet access to use the GCP APIs. +* **xref:../../installing/installing_gcp/installing-restricted-networks-gcp-installer-provisioned.adoc#installing-restricted-networks-gcp-installer-provisioned[Installing a cluster on {gcp-full} in a restricted network]**: You can install {product-title} on {gcp-full} on installer-provisioned infrastructure by using an internal mirror of the installation release content. You can use this method to install a cluster that does not require an active internet connection to obtain the software components. While you can install {product-title} by using the mirrored content, your cluster still requires internet access to use the {gcp-full} APIs. -* **xref:../../installing/installing_gcp/installing-gcp-vpc.adoc#installing-gcp-vpc[Installing a cluster into an existing Virtual Private Cloud]**: You can install {product-title} on an existing GCP Virtual Private Cloud (VPC). You can use this installation method if you have constraints set by the guidelines of your company, such as limits on creating new accounts or infrastructure. +* **xref:../../installing/installing_gcp/installing-gcp-vpc.adoc#installing-gcp-vpc[Installing a cluster into an existing Virtual Private Cloud]**: You can install {product-title} on an existing {gcp-full} Virtual Private Cloud (VPC). You can use this installation method if you have constraints set by the guidelines of your company, such as limits on creating new accounts or infrastructure. -* **xref:../../installing/installing_gcp/installing-gcp-private.adoc#installing-gcp-private[Installing a private cluster on an existing VPC]**: You can install a private cluster on an existing GCP VPC. You can use this method to deploy {product-title} on an internal network that is not visible to the internet. +* **xref:../../installing/installing_gcp/installing-gcp-private.adoc#installing-gcp-private[Installing a private cluster on an existing VPC]**: You can install a private cluster on an existing {gcp-full} VPC. You can use this method to deploy {product-title} on an internal network that is not visible to the internet. [id="choosing-an-method-to-install-ocp-on-gcp-user-provisioned"] === Installing a cluster on user-provisioned infrastructure -You can install a cluster on GCP infrastructure that you provision, by using one of the following methods: +You can install a cluster on {gcp-full} infrastructure that you provision, by using one of the following methods: -* **xref:../../installing/installing_gcp/installing-gcp-user-infra.adoc#installing-gcp-user-infra[Installing a cluster on GCP with user-provisioned infrastructure]**: You can install {product-title} on GCP infrastructure that you provide. You can use the provided Deployment Manager templates to assist with the installation. +* **xref:../../installing/installing_gcp/installing-gcp-user-infra.adoc#installing-gcp-user-infra[Installing a cluster on {gcp-full} with user-provisioned infrastructure]**: You can install {product-title} on {gcp-full} infrastructure that you provide. You can use the provided Deployment Manager templates to assist with the installation. -* **xref:../../installing/installing_gcp/installing-gcp-user-infra-vpc.adoc#installing-gcp-user-infra-vpc[Installing a cluster with shared VPC on user-provisioned infrastructure in GCP]**: You can use the provided Deployment Manager templates to create GCP resources in a shared VPC infrastructure. +* **xref:../../installing/installing_gcp/installing-gcp-user-infra-vpc.adoc#installing-gcp-user-infra-vpc[Installing a cluster with shared VPC on user-provisioned infrastructure in {gcp-full}]**: You can use the provided Deployment Manager templates to create {gcp-full} resources in a shared VPC infrastructure. -* **xref:../../installing/installing_gcp/installing-restricted-networks-gcp.adoc#installing-restricted-networks-gcp[Installing a cluster on GCP in a restricted network with user-provisioned infrastructure]**: You can install {product-title} on GCP in a restricted network with user-provisioned infrastructure. By creating an internal mirror of the installation release content, you can install a cluster that does not require an active internet connection to obtain the software components. You can also use this installation method to ensure that your clusters only use container images that satisfy your organizational controls on external content. +* **xref:../../installing/installing_gcp/installing-restricted-networks-gcp.adoc#installing-restricted-networks-gcp[Installing a cluster on {gcp-full} in a restricted network with user-provisioned infrastructure]**: You can install {product-title} on {gcp-full} in a restricted network with user-provisioned infrastructure. By creating an internal mirror of the installation release content, you can install a cluster that does not require an active internet connection to obtain the software components. You can also use this installation method to ensure that your clusters only use container images that satisfy your organizational controls on external content. [id="preparing-to-install-on-gcp-next-steps"] == Next steps -* xref:../../installing/installing_gcp/installing-gcp-account.adoc#installing-gcp-account[Configuring a GCP project] +* xref:../../installing/installing_gcp/installing-gcp-account.adoc#installing-gcp-account[Configuring a {gcp-full} project] diff --git a/installing/installing_gcp/uninstalling-cluster-gcp.adoc b/installing/installing_gcp/uninstalling-cluster-gcp.adoc index 834a705cf3ac..48d8d8ab9c24 100644 --- a/installing/installing_gcp/uninstalling-cluster-gcp.adoc +++ b/installing/installing_gcp/uninstalling-cluster-gcp.adoc @@ -1,12 +1,12 @@ :_mod-docs-content-type: ASSEMBLY -[id="uninstalling-cluster-gcp"] -= Uninstalling a cluster on GCP include::_attributes/common-attributes.adoc[] +[id="uninstalling-cluster-gcp"] += Uninstalling a cluster on {gcp-full} :context: uninstalling-cluster-gcp toc::[] -You can remove a cluster that you deployed to Google Cloud Platform (GCP). +You can remove a cluster that you deployed to {gcp-full}. include::modules/installation-uninstall-clouds.adoc[leveloffset=+1] diff --git a/installing/installing_sno/install-sno-installing-sno.adoc b/installing/installing_sno/install-sno-installing-sno.adoc index f2ef057aae27..8674f9f2c98e 100644 --- a/installing/installing_sno/install-sno-installing-sno.adoc +++ b/installing/installing_sno/install-sno-installing-sno.adoc @@ -131,7 +131,7 @@ include::modules/install-sno-installing-sno-on-gcp.adoc[leveloffset=+2] [role="_additional-resources"] .Additional resources -* xref:../../installing/installing_gcp/installing-gcp-customizations.adoc#installing-gcp-customizations[Installing a cluster on GCP with customizations] +* xref:../../installing/installing_gcp/installing-gcp-customizations.adoc#installing-gcp-customizations[Installing a cluster on {gcp-full} with customizations] include::modules/install-sno-installing-with-usb-media.adoc[leveloffset=+1] diff --git a/installing/overview/cluster-capabilities.adoc b/installing/overview/cluster-capabilities.adoc index 56ab3998f18c..e232fd7b3a13 100644 --- a/installing/overview/cluster-capabilities.adoc +++ b/installing/overview/cluster-capabilities.adoc @@ -19,7 +19,7 @@ include::snippets/capabilities-table.adoc[] [role="_additional-resources"] .Additional resources * xref:../../installing/installing_aws/ipi/installing-aws-customizations.adoc#installing-aws-customizations[Installing a cluster on AWS with customizations] -* xref:../../installing/installing_gcp/installing-gcp-customizations.adoc#installing-gcp-customizations[Installing a cluster on GCP with customizations] +* xref:../../installing/installing_gcp/installing-gcp-customizations.adoc#installing-gcp-customizations[Installing a cluster on {gcp-full} with customizations] include::modules/explanation-of-capabilities.adoc[leveloffset=+1] diff --git a/installing/overview/installing-fips.adoc b/installing/overview/installing-fips.adoc index da8e63baba8d..e5b4b3c2ecf6 100644 --- a/installing/overview/installing-fips.adoc +++ b/installing/overview/installing-fips.adoc @@ -96,7 +96,7 @@ To enable FIPS mode for your cluster, you must run the installation program from * xref:../../installing/installing_aws/ipi/installing-aws-customizations.adoc#installing-aws-customizations[Amazon Web Services] * xref:../../installing/installing_azure/ipi/installing-azure-customizations.adoc#installing-azure-customizations[Microsoft Azure] * xref:../../installing/installing_bare_metal/upi/installing-bare-metal.adoc#installing-bare-metal[Bare metal] -* xref:../../installing/installing_gcp/installing-gcp-customizations.adoc#installing-gcp-customizations[Google Cloud Platform] +* xref:../../installing/installing_gcp/installing-gcp-customizations.adoc#installing-gcp-customizations[{gcp-full}] * xref:../../installing/installing_ibm_cloud/installing-ibm-cloud-customizations.adoc#installing-ibm-cloud-customizations[{ibm-cloud-name}] * xref:../../installing/installing_ibm_power/installing-ibm-power.adoc#installing-ibm-power[{ibm-power-name}] * xref:../../installing/installing_ibm_z/upi/installing-ibm-z.adoc#installing-ibm-z[{ibm-z-name} and {ibm-linuxone-name}] diff --git a/installing/overview/installing-preparing.adoc b/installing/overview/installing-preparing.adoc index 2cb524d06229..f8c750629e58 100644 --- a/installing/overview/installing-preparing.adoc +++ b/installing/overview/installing-preparing.adoc @@ -24,8 +24,8 @@ endif::openshift-origin[] * Microsoft Azure on 64-bit x86 instances * Microsoft Azure on 64-bit ARM instances * Microsoft Azure Stack Hub -* Google Cloud Platform (GCP) on 64-bit x86 instances -* Google Cloud Platform (GCP) on 64-bit ARM instances +* {gcp-full} on 64-bit x86 instances +* {gcp-full} on 64-bit ARM instances * {rh-openstack-first} * {ibm-cloud-name} * {ibm-z-name} or {ibm-linuxone-name} with z/VM @@ -58,13 +58,13 @@ You can deploy an installer-provisioned infrastructure cluster without specifyin If you need to perform basic configuration for your installer-provisioned infrastructure cluster, such as the instance type for the cluster machines, you can customize an installation for xref:../../installing/installing_aws/ipi/installing-aws-customizations.adoc#installing-aws-customizations[AWS], xref:../../installing/installing_azure/ipi/installing-azure-customizations.adoc#installing-azure-customizations[Azure], xref:../../installing/installing_gcp/installing-gcp-customizations.adoc#installing-gcp-customizations[GCP], xref:../../installing/installing_nutanix/installing-nutanix-installer-provisioned.adoc#installing-nutanix-installer-provisioned[Nutanix]. -For installer-provisioned infrastructure installations, you can use an existing xref:../../installing/installing_aws/ipi/installing-aws-vpc.adoc#installing-aws-vpc[VPC in AWS], xref:../../installing/installing_azure/ipi/installing-azure-vnet.adoc#installing-azure-vnet[vNet in Azure], or xref:../../installing/installing_gcp/installing-gcp-vpc.adoc#installing-gcp-vpc[VPC in GCP]. You can also reuse part of your networking infrastructure so that your cluster in xref:../../installing/installing_aws/ipi/installing-aws-customizations.adoc#installing-aws-customizations[AWS], xref:../../installing/installing_azure/ipi/installing-azure-customizations.adoc#installing-azure-customizations[Azure], xref:../../installing/installing_gcp/installing-gcp-customizations.adoc#installing-gcp-customizations[GCP] can coexist with existing IP address allocations in your environment and integrate with existing MTU and VXLAN configurations. If you have existing accounts and credentials on these clouds, you can re-use them, but you might need to modify the accounts to have the required permissions to install {product-title} clusters on them. +For installer-provisioned infrastructure installations, you can use an existing xref:../../installing/installing_aws/ipi/installing-aws-vpc.adoc#installing-aws-vpc[VPC in AWS], xref:../../installing/installing_azure/ipi/installing-azure-vnet.adoc#installing-azure-vnet[vNet in Azure], or xref:../../installing/installing_gcp/installing-gcp-vpc.adoc#installing-gcp-vpc[VPC in {gcp-full}]. You can also reuse part of your networking infrastructure so that your cluster in xref:../../installing/installing_aws/ipi/installing-aws-customizations.adoc#installing-aws-customizations[AWS], xref:../../installing/installing_azure/ipi/installing-azure-customizations.adoc#installing-azure-customizations[Azure], xref:../../installing/installing_gcp/installing-gcp-customizations.adoc#installing-gcp-customizations[{gcp-full}] can coexist with existing IP address allocations in your environment and integrate with existing MTU and VXLAN configurations. If you have existing accounts and credentials on these clouds, you can re-use them, but you might need to modify the accounts to have the required permissions to install {product-title} clusters on them. You can use the installer-provisioned infrastructure method to create appropriate machine instances on your hardware for xref:../../installing/installing_vsphere/ipi/installing-vsphere-installer-provisioned.adoc#installing-vsphere-installer-provisioned[vSphere], and xref:../../installing/installing_bare_metal/ipi/ipi-install-overview.adoc#ipi-install-overview[bare metal]. Additionally, for xref:../../installing/installing_vsphere/ipi/installing-vsphere-installer-provisioned-customizations.adoc#installing-vsphere-installer-provisioned-customizations[vSphere], you can also customize additional network parameters during installation. For some installer-provisioned infrastructure installations, for example on the {vmw-first} and bare metal platforms, the external traffic that reaches the ingress virtual IP (VIP) is not balanced between the default `IngressController` replicas. For {vmw-short} and bare metal installer-provisioned infrastructure installations where exceeding the baseline `IngressController` router performance is expected, you must configure an external load balancer. Configuring an external load balancer achieves the performance of multiple `IngressController` replicas. For more information about the baseline `IngressController` performance, see xref:../../scalability_and_performance/optimization/routing-optimization.adoc#baseline-router-performance_routing-optimization[Baseline Ingress Controller (router) performance]. For more information about configuring an external load balancer, see xref:../../installing/installing_bare_metal/ipi/ipi-install-installation-workflow.adoc#nw-osp-configuring-external-load-balancer_ipi-install-installation-workflow[Configuring a user-managed load balancer]. -If you want to reuse extensive cloud infrastructure, you can complete a _user-provisioned infrastructure_ installation. With these installations, you manually deploy the machines that your cluster requires during the installation process. If you perform a user-provisioned infrastructure installation on xref:../../installing/installing_aws/upi/installing-aws-user-infra.adoc#installing-aws-user-infra[AWS], xref:../../installing/installing_azure/upi/installing-azure-user-infra.adoc#installing-azure-user-infra[Azure], xref:../../installing/installing_azure_stack_hub/upi/installing-azure-stack-hub-user-infra.adoc#installing-azure-stack-hub-user-infra[Azure Stack Hub], you can use the provided templates to help you stand up all of the required components. You can also reuse a shared xref:../../installing/installing_gcp/installing-gcp-user-infra-vpc.adoc#installing-gcp-user-infra-vpc[VPC on GCP]. Otherwise, you can use the xref:../../installing/installing_platform_agnostic/installing-platform-agnostic.adoc#installing-platform-agnostic[provider-agnostic] installation method to deploy a cluster into other clouds. +If you want to reuse extensive cloud infrastructure, you can complete a _user-provisioned infrastructure_ installation. With these installations, you manually deploy the machines that your cluster requires during the installation process. If you perform a user-provisioned infrastructure installation on xref:../../installing/installing_aws/upi/installing-aws-user-infra.adoc#installing-aws-user-infra[AWS], xref:../../installing/installing_azure/upi/installing-azure-user-infra.adoc#installing-azure-user-infra[Azure], xref:../../installing/installing_azure_stack_hub/upi/installing-azure-stack-hub-user-infra.adoc#installing-azure-stack-hub-user-infra[Azure Stack Hub], you can use the provided templates to help you stand up all of the required components. You can also reuse a shared xref:../../installing/installing_gcp/installing-gcp-user-infra-vpc.adoc#installing-gcp-user-infra-vpc[VPC on {gcp-full}]. Otherwise, you can use the xref:../../installing/installing_platform_agnostic/installing-platform-agnostic.adoc#installing-platform-agnostic[provider-agnostic] installation method to deploy a cluster into other clouds. You can also complete a user-provisioned infrastructure installation on your existing hardware. If you use xref:../../installing/installing_openstack/installing-openstack-user.adoc#installing-openstack-user[{rh-openstack}], xref:../../installing/installing_ibm_z/upi/installing-ibm-z.adoc#installing-ibm-z[{ibm-z-name} or {ibm-linuxone-name}], xref:../../installing/installing_ibm_z/upi/installing-ibm-z-kvm.adoc#installing-ibm-z-kvm[{ibm-z-name} and {ibm-linuxone-name} with {op-system-base} KVM], xref:../../installing/installing_ibm_z/upi/installing-ibm-z-lpar.adoc#installing-ibm-z-lpar[{ibm-z-name} and {ibm-linuxone-name} in an LPAR], xref:../../installing/installing_ibm_power/installing-ibm-power.adoc#installing-ibm-power[{ibm-power-title}], or xref:../../installing/installing_vsphere/upi/installing-vsphere.adoc#installing-vsphere[vSphere], use the specific installation instructions to deploy your cluster. If you use other supported hardware, follow the xref:../../installing/installing_bare_metal/upi/installing-bare-metal.adoc#installing-bare-metal[bare metal installation] procedure. For some of these platforms, such as xref:../../installing/installing_vsphere/upi/installing-vsphere.adoc#installing-vsphere[vSphere] and xref:../../installing/installing_bare_metal/upi/installing-bare-metal-network-customizations.adoc#installing-bare-metal-network-customizations[bare metal], you can also customize additional network parameters during installation. @@ -77,7 +77,7 @@ If you use a user-provisioned installation method, you can configure a proxy for If you want to prevent your cluster on a public cloud from exposing endpoints externally, you can deploy a private cluster with installer-provisioned infrastructure on xref:../../installing/installing_aws/ipi/installing-aws-private.adoc#installing-aws-private[AWS], xref:../../installing/installing_azure/ipi/installing-azure-private.adoc#installing-azure-private[Azure], or xref:../../installing/installing_gcp/installing-gcp-private.adoc#installing-gcp-private[GCP]. -If you need to install your cluster that has limited access to the internet, such as a disconnected or restricted network cluster, you can xref:../../disconnected/installing-mirroring-installation-images.adoc#installing-mirroring-installation-images[mirror the installation packages] and install the cluster from them. Follow detailed instructions for user-provisioned infrastructure installations into restricted networks for xref:../../installing/installing_aws/upi/installing-restricted-networks-aws.adoc#installing-restricted-networks-aws[AWS], xref:../../installing/installing_gcp/installing-restricted-networks-gcp.adoc#installing-restricted-networks-gcp[GCP], xref:../../installing/installing_ibm_z/upi/installing-restricted-networks-ibm-z.adoc#installing-restricted-networks-ibm-z[{ibm-z-name} or {ibm-linuxone-name}], xref:../../installing/installing_ibm_z/upi/installing-restricted-networks-ibm-z-kvm.adoc#installing-restricted-networks-ibm-z-kvm[{ibm-z-name} or {ibm-linuxone-name} with {op-system-base} KVM], xref:../../installing/installing_ibm_z/upi/installing-restricted-networks-ibm-z-lpar.adoc#installing-restricted-networks-ibm-z-lpar[{ibm-z-name} or {ibm-linuxone-name} in an LPAR], xref:../../installing/installing_ibm_power/installing-restricted-networks-ibm-power.adoc#installing-restricted-networks-ibm-power[{ibm-power-name}], xref:../../installing/installing_vsphere/upi/installing-restricted-networks-vsphere.adoc#installing-restricted-networks-vsphere[vSphere], or xref:../../installing/installing_bare_metal/upi/installing-restricted-networks-bare-metal.adoc#installing-restricted-networks-bare-metal[bare metal]. You can also install a cluster into a restricted network using installer-provisioned infrastructure by following detailed instructions for xref:../../installing/installing_aws/ipi/installing-restricted-networks-aws-installer-provisioned.adoc#installing-restricted-networks-aws-installer-provisioned[AWS], xref:../../installing/installing_gcp/installing-restricted-networks-gcp-installer-provisioned.adoc#installing-restricted-networks-gcp-installer-provisioned[GCP], xref:../../installing/installing_ibm_cloud/installing-ibm-cloud-restricted.adoc#installing-ibm-cloud-restricted[{ibm-cloud-name}], xref:../../installing/installing_nutanix/installing-restricted-networks-nutanix-installer-provisioned.adoc#installing-restricted-networks-nutanix-installer-provisioned[Nutanix], xref:../../installing/installing_openstack/installing-openstack-installer-restricted.adoc#installing-openstack-installer-restricted[{rh-openstack}], and xref:../../installing/installing_vsphere/ipi/installing-restricted-networks-installer-provisioned-vsphere.adoc#installing-restricted-networks-installer-provisioned-vsphere[vSphere]. +If you need to install your cluster that has limited access to the internet, such as a disconnected or restricted network cluster, you can xref:../../disconnected/installing-mirroring-installation-images.adoc#installing-mirroring-installation-images[mirror the installation packages] and install the cluster from them. Follow detailed instructions for user-provisioned infrastructure installations into restricted networks for xref:../../installing/installing_aws/upi/installing-restricted-networks-aws.adoc#installing-restricted-networks-aws[AWS], xref:../../installing/installing_gcp/installing-restricted-networks-gcp.adoc#installing-restricted-networks-gcp[{gcp-full}], xref:../../installing/installing_ibm_z/upi/installing-restricted-networks-ibm-z.adoc#installing-restricted-networks-ibm-z[{ibm-z-name} or {ibm-linuxone-name}], xref:../../installing/installing_ibm_z/upi/installing-restricted-networks-ibm-z-kvm.adoc#installing-restricted-networks-ibm-z-kvm[{ibm-z-name} or {ibm-linuxone-name} with {op-system-base} KVM], xref:../../installing/installing_ibm_z/upi/installing-restricted-networks-ibm-z-lpar.adoc#installing-restricted-networks-ibm-z-lpar[{ibm-z-name} or {ibm-linuxone-name} in an LPAR], xref:../../installing/installing_ibm_power/installing-restricted-networks-ibm-power.adoc#installing-restricted-networks-ibm-power[{ibm-power-name}], xref:../../installing/installing_vsphere/upi/installing-restricted-networks-vsphere.adoc#installing-restricted-networks-vsphere[vSphere], or xref:../../installing/installing_bare_metal/upi/installing-restricted-networks-bare-metal.adoc#installing-restricted-networks-bare-metal[bare metal]. You can also install a cluster into a restricted network using installer-provisioned infrastructure by following detailed instructions for xref:../../installing/installing_aws/ipi/installing-restricted-networks-aws-installer-provisioned.adoc#installing-restricted-networks-aws-installer-provisioned[AWS], xref:../../installing/installing_gcp/installing-restricted-networks-gcp-installer-provisioned.adoc#installing-restricted-networks-gcp-installer-provisioned[GCP], xref:../../installing/installing_ibm_cloud/installing-ibm-cloud-restricted.adoc#installing-ibm-cloud-restricted[{ibm-cloud-name}], xref:../../installing/installing_nutanix/installing-restricted-networks-nutanix-installer-provisioned.adoc#installing-restricted-networks-nutanix-installer-provisioned[Nutanix], xref:../../installing/installing_openstack/installing-openstack-installer-restricted.adoc#installing-openstack-installer-restricted[{rh-openstack}], and xref:../../installing/installing_vsphere/ipi/installing-restricted-networks-installer-provisioned-vsphere.adoc#installing-restricted-networks-installer-provisioned-vsphere[vSphere]. If you need to deploy your cluster to an xref:../../installing/installing_aws/ipi/installing-aws-specialized-region.adoc#installing-aws-specialized-region[AWS GovCloud region], xref:../../installing/installing_aws/ipi/installing-aws-specialized-region.adoc#installing-aws-specialized-region[AWS China region], or xref:../../installing/installing_azure/ipi/installing-azure-government-region.adoc#installing-azure-government-region[Azure government region], you can configure those custom regions during an installer-provisioned infrastructure installation. @@ -132,7 +132,7 @@ Not all installation options are supported for all platforms, as shown in the fo //This table is for all flavors of OpenShift, except OKD. A separate table is required because OKD does not support multiple AWS architecture types. Trying to maintain one table using conditions, while convenient, is very fragile and prone to publishing errors. ifndef::openshift-origin[] |=== -||AWS (64-bit x86) |AWS (64-bit ARM) |Azure (64-bit x86) |Azure (64-bit ARM)|Azure Stack Hub |GCP (64-bit x86) |GCP (64-bit ARM) |Nutanix |{rh-openstack} |Bare metal (64-bit x86) |Bare metal (64-bit ARM) |vSphere |{ibm-cloud-name} |{ibm-z-name} |{ibm-power-name} |{ibm-power-server-name} +||AWS (64-bit x86) |AWS (64-bit ARM) |Azure (64-bit x86) |Azure (64-bit ARM)|Azure Stack Hub |{gcp-full} (64-bit x86) |{gcp-full} (64-bit ARM) |Nutanix |{rh-openstack} |Bare metal (64-bit x86) |Bare metal (64-bit ARM) |vSphere |{ibm-cloud-name} |{ibm-z-name} |{ibm-power-name} |{ibm-power-server-name} |Default |xref:../../installing/installing_aws/ipi/installing-aws-default.adoc#installing-aws-default[✓] @@ -303,7 +303,7 @@ endif::openshift-origin[] //This table is for OKD only. A separate table is required because OKD does not support multiple AWS architecture types. Trying to maintain one table using conditions, while convenient, is very fragile and prone to publishing errors. ifdef::openshift-origin[] |=== -||AWS |Azure |Azure Stack Hub |GCP |Nutanix |{rh-openstack} |Bare metal |vSphere |{ibm-cloud-name} |{ibm-z-name} |{ibm-power-name} +||AWS |Azure |Azure Stack Hub |{gcp-full} |Nutanix |{rh-openstack} |Bare metal |vSphere |{ibm-cloud-name} |{ibm-z-name} |{ibm-power-name} |Default @@ -429,7 +429,7 @@ endif::openshift-origin[] //This table is for all flavors of OpenShift, except OKD. A separate table is required because OKD does not support multiple AWS architecture types. Trying to maintain one table using conditions, while convenient, is very fragile and prone to publishing errors. ifndef::openshift-origin[] |=== -||AWS (64-bit x86) |AWS (64-bit ARM) |Azure (64-bit x86) |Azure (64-bit ARM) |Azure Stack Hub |GCP (64-bit x86) |GCP (64-bit ARM) |Nutanix |{rh-openstack} |Bare metal (64-bit x86) |Bare metal (64-bit ARM) |vSphere |{ibm-cloud-name} |{ibm-z-name} |{ibm-z-name} with {op-system-base} KVM |{ibm-power-name} |Platform agnostic +||AWS (64-bit x86) |AWS (64-bit ARM) |Azure (64-bit x86) |Azure (64-bit ARM) |Azure Stack Hub |{gcp-full} (64-bit x86) |{gcp-full} (64-bit ARM) |Nutanix |{rh-openstack} |Bare metal (64-bit x86) |Bare metal (64-bit ARM) |vSphere |{ibm-cloud-name} |{ibm-z-name} |{ibm-z-name} with {op-system-base} KVM |{ibm-power-name} |Platform agnostic |Custom @@ -515,7 +515,7 @@ endif::openshift-origin[] //This table is for OKD only. A separate table is required because OKD does not support multiple AWS architecture types. Trying to maintain one table using conditions, while convenient, is very fragile and prone to publishing errors. ifdef::openshift-origin[] |=== -||AWS |Azure |Azure Stack Hub |GCP |Nutanix |{rh-openstack}|Bare metal |vSphere |{ibm-cloud-name} |{ibm-z-name} |{ibm-z-name} with {op-system-base} KVM |{ibm-power-name} |Platform agnostic +||AWS |Azure |Azure Stack Hub |{gcp-full} |Nutanix |{rh-openstack}|Bare metal |vSphere |{ibm-cloud-name} |{ibm-z-name} |{ibm-z-name} with {op-system-base} KVM |{ibm-power-name} |Platform agnostic |Custom diff --git a/machine_management/cluster_api_machine_management/cluster_api_provider_configurations/cluster-api-config-options-gcp.adoc b/machine_management/cluster_api_machine_management/cluster_api_provider_configurations/cluster-api-config-options-gcp.adoc index 28ed4ab66151..456c23b83de9 100644 --- a/machine_management/cluster_api_machine_management/cluster_api_provider_configurations/cluster-api-config-options-gcp.adoc +++ b/machine_management/cluster_api_machine_management/cluster_api_provider_configurations/cluster-api-config-options-gcp.adoc @@ -1,6 +1,6 @@ :_mod-docs-content-type: ASSEMBLY [id="cluster-api-config-options-gcp"] -= Cluster API configuration options for Google Cloud Platform += Cluster API configuration options for {gcp-full} include::_attributes/common-attributes.adoc[] :context: cluster-api-config-options-gcp @@ -16,10 +16,10 @@ include::snippets/technology-preview.adoc[] The following example YAML files show configurations for a {gcp-full} cluster. -//Sample YAML for CAPI GCP machine template resource +//Sample YAML for CAPI {gcp-full} machine template resource include::modules/capi-yaml-machine-template-gcp.adoc[leveloffset=+2] -//Sample YAML for a CAPI GCP compute machine set resource +//Sample YAML for a CAPI {gcp-full} compute machine set resource include::modules/capi-yaml-machine-set-gcp.adoc[leveloffset=+2] // [id="cluster-api-supported-features-gcp_{context}"] diff --git a/machine_management/control_plane_machine_management/cpmso-getting-started.adoc b/machine_management/control_plane_machine_management/cpmso-getting-started.adoc index 2cb2ada5af2f..9e5a5ff289f9 100644 --- a/machine_management/control_plane_machine_management/cpmso-getting-started.adoc +++ b/machine_management/control_plane_machine_management/cpmso-getting-started.adoc @@ -19,7 +19,7 @@ If you are uncertain about the state of the `ControlPlaneMachineSet` CR in your [id="cpmso-platform-matrix_{context}"] == Supported cloud providers -In {product-title} {product-version}, the control plane machine set is supported for Amazon Web Services (AWS), Google Cloud Platform (GCP), Microsoft Azure, Nutanix, and VMware vSphere clusters. +In {product-title} {product-version}, the control plane machine set is supported for Amazon Web Services (AWS), {gcp-full}, Microsoft Azure, Nutanix, and VMware vSphere clusters. The status of the control plane machine set after installation depends on your cloud provider and the version of {product-title} that you installed on your cluster. diff --git a/machine_management/control_plane_machine_management/cpmso_provider_configurations/cpmso-config-options-gcp.adoc b/machine_management/control_plane_machine_management/cpmso_provider_configurations/cpmso-config-options-gcp.adoc index 8b61d2d5601e..d3c2076bb054 100644 --- a/machine_management/control_plane_machine_management/cpmso_provider_configurations/cpmso-config-options-gcp.adoc +++ b/machine_management/control_plane_machine_management/cpmso_provider_configurations/cpmso-config-options-gcp.adoc @@ -1,6 +1,6 @@ :_mod-docs-content-type: ASSEMBLY [id="cpmso-config-options-gcp"] -= Control plane configuration options for Google Cloud Platform += Control plane configuration options for {gcp-full} include::_attributes/common-attributes.adoc[] :context: cpmso-config-options-gcp @@ -14,10 +14,10 @@ When you save an update to the control plane machine set, the Control Plane Mach The following example YAML snippets show provider specification and failure domain configurations for a {gcp-short} cluster. -//Sample GCP provider specification +//Sample {gcp-full} provider specification include::modules/cpmso-yaml-provider-spec-gcp.adoc[leveloffset=+2] -//Sample GCP failure domain configuration +//Sample {gcp-full} failure domain configuration include::modules/cpmso-yaml-failure-domain-gcp.adoc[leveloffset=+2] [id="cpmso-supported-features-gcp_{context}"] @@ -25,7 +25,7 @@ include::modules/cpmso-yaml-failure-domain-gcp.adoc[leveloffset=+2] You can enable features by updating values in the control plane machine set. -//Note: GCP GPU features should be compatible with CPMS, but dev cannot think of a use case. Leaving them out to keep things less cluttered. If a customer use case emerges, we can just add the necessary modules in here. +//Note: {gcp-full} GPU features should be compatible with CPMS, but dev cannot think of a use case. Leaving them out to keep things less cluttered. If a customer use case emerges, we can just add the necessary modules in here. //Configuring persistent disk types by using machine sets include::modules/machineset-gcp-pd-disk-types.adoc[leveloffset=+2] diff --git a/machine_management/creating-infrastructure-machinesets.adoc b/machine_management/creating-infrastructure-machinesets.adoc index 6cc3fdf5423c..4e4d8fbac654 100644 --- a/machine_management/creating-infrastructure-machinesets.adoc +++ b/machine_management/creating-infrastructure-machinesets.adoc @@ -52,8 +52,8 @@ include::modules/machineset-yaml-ibm-cloud.adoc[leveloffset=+3] include::modules/machineset-yaml-gcp.adoc[leveloffset=+3] -Machine sets running on GCP support non-guaranteed xref:../machine_management/creating_machinesets/creating-machineset-gcp.adoc#machineset-non-guaranteed-instance_creating-machineset-gcp[preemptible VM instances]. You can save on costs by using preemptible VM instances at a lower price -compared to normal instances on GCP. You can xref:../machine_management/creating_machinesets/creating-machineset-gcp.adoc#machineset-creating-non-guaranteed-instance_creating-machineset-gcp[configure preemptible VM instances] by adding `preemptible` to the `MachineSet` YAML file. +Machine sets running on {gcp-full} support non-guaranteed xref:../machine_management/creating_machinesets/creating-machineset-gcp.adoc#machineset-non-guaranteed-instance_creating-machineset-gcp[preemptible VM instances]. You can save on costs by using preemptible VM instances at a lower price +compared to normal instances on {gcp-full}. You can xref:../machine_management/creating_machinesets/creating-machineset-gcp.adoc#machineset-creating-non-guaranteed-instance_creating-machineset-gcp[configure preemptible VM instances] by adding `preemptible` to the `MachineSet` YAML file. include::modules/machineset-yaml-nutanix.adoc[leveloffset=+3] diff --git a/machine_management/creating_machinesets/creating-machineset-gcp.adoc b/machine_management/creating_machinesets/creating-machineset-gcp.adoc index 967d55e415af..673d9cac1403 100644 --- a/machine_management/creating_machinesets/creating-machineset-gcp.adoc +++ b/machine_management/creating_machinesets/creating-machineset-gcp.adoc @@ -1,17 +1,17 @@ :_mod-docs-content-type: ASSEMBLY -[id="creating-machineset-gcp"] -= Creating a compute machine set on GCP include::_attributes/common-attributes.adoc[] +[id="creating-machineset-gcp"] += Creating a compute machine set on {gcp-full} :context: creating-machineset-gcp toc::[] -You can create a different compute machine set to serve a specific purpose in your {product-title} cluster on Google Cloud Platform (GCP). For example, you might create infrastructure machine sets and related machines so that you can move supporting workloads to the new machines. +You can create a different compute machine set to serve a specific purpose in your {product-title} cluster on {gcp-full}. For example, you might create infrastructure machine sets and related machines so that you can move supporting workloads to the new machines. //[IMPORTANT] admonition for UPI include::snippets/machine-user-provisioned-limitations.adoc[leveloffset=+1] -//Sample YAML for a compute machine set custom resource on GCP +//Sample YAML for a compute machine set custom resource on {gcp-full} include::modules/machineset-yaml-gcp.adoc[leveloffset=+1] //Creating a compute machine set diff --git a/machine_management/index.adoc b/machine_management/index.adoc index fa690ca47554..0fa5cb1c0a00 100644 --- a/machine_management/index.adoc +++ b/machine_management/index.adoc @@ -6,7 +6,7 @@ include::_attributes/common-attributes.adoc[] toc::[] -You can use machine management to flexibly work with underlying infrastructure such as Amazon Web Services (AWS), Microsoft Azure, Google Cloud Platform (GCP), {rh-openstack-first}, and VMware vSphere to manage the {product-title} cluster. +You can use machine management to flexibly work with underlying infrastructure such as Amazon Web Services (AWS), Microsoft Azure, {gcp-full}, {rh-openstack-first}, and VMware vSphere to manage the {product-title} cluster. You can control the cluster and perform auto-scaling, such as scaling up and down the cluster based on specific workload policies. It is important to have a cluster that adapts to changing workloads. The {product-title} cluster can horizontally scale up and down when the load increases or decreases. diff --git a/machine_management/user_infra/adding-compute-user-infra-general.adoc b/machine_management/user_infra/adding-compute-user-infra-general.adoc index c5fcc931dbda..ee5c97171b8b 100644 --- a/machine_management/user_infra/adding-compute-user-infra-general.adoc +++ b/machine_management/user_infra/adding-compute-user-infra-general.adoc @@ -24,9 +24,9 @@ To add more compute machines to your {product-title} cluster on Microsoft Azure, To add more compute machines to your {product-title} cluster on Azure Stack Hub, see xref:../../installing/installing_azure_stack_hub/upi/installing-azure-stack-hub-user-infra.adoc#installation-creating-azure-worker_installing-azure-stack-hub-user-infra[Creating additional worker machines in Azure Stack Hub]. [id="upi-adding-compute-gcp"] -== Adding compute machines to Google Cloud Platform +== Adding compute machines to {gcp-full} -To add more compute machines to your {product-title} cluster on Google Cloud Platform (GCP), see xref:../../installing/installing_gcp/installing-restricted-networks-gcp.adoc#installation-creating-gcp-worker_installing-restricted-networks-gcp[Creating additional worker machines in GCP]. +To add more compute machines to your {product-title} cluster on {gcp-full}, see xref:../../installing/installing_gcp/installing-restricted-networks-gcp.adoc#installation-creating-gcp-worker_installing-restricted-networks-gcp[Creating additional worker machines in {gcp-full}]. [id="upi-adding-compute-vsphere"] == Adding compute machines to vSphere diff --git a/migrating_from_ocp_3_to_4/installing-3-4.adoc b/migrating_from_ocp_3_to_4/installing-3-4.adoc index fba66653a599..0e1f5601ec6a 100644 --- a/migrating_from_ocp_3_to_4/installing-3-4.adoc +++ b/migrating_from_ocp_3_to_4/installing-3-4.adoc @@ -36,7 +36,7 @@ The following storage providers are supported: * xref:../migrating_from_ocp_3_to_4/installing-3-4.adoc#migration-configuring-mcg_installing-3-4[Multicloud Object Gateway] * xref:../migrating_from_ocp_3_to_4/installing-3-4.adoc#migration-configuring-aws-s3_installing-3-4[Amazon Web Services S3] -* xref:../migrating_from_ocp_3_to_4/installing-3-4.adoc#migration-configuring-gcp_installing-3-4[Google Cloud Platform] +* xref:../migrating_from_ocp_3_to_4/installing-3-4.adoc#migration-configuring-gcp_installing-3-4[{gcp-full}] * xref:../migrating_from_ocp_3_to_4/installing-3-4.adoc#migration-configuring-azure_installing-3-4[Microsoft Azure Blob] * Generic S3 object storage, for example, Minio or Ceph S3 diff --git a/migrating_from_ocp_3_to_4/planning-migration-3-4.adoc b/migrating_from_ocp_3_to_4/planning-migration-3-4.adoc index bbe15b1df02c..2f44190d44af 100644 --- a/migrating_from_ocp_3_to_4/planning-migration-3-4.adoc +++ b/migrating_from_ocp_3_to_4/planning-migration-3-4.adoc @@ -128,7 +128,7 @@ For more information, see xref:../storage/understanding-persistent-storage.adoc# * Amazon Web Services (AWS) Elastic Block Storage (EBS) * Azure Disk * Azure File -* Google Cloud Platform Persistent Disk (GCP PD) +* {gcp-full} Persistent Disk ({gcp-full} PD) * OpenStack Cinder * VMware vSphere + diff --git a/migration_toolkit_for_containers/installing-mtc.adoc b/migration_toolkit_for_containers/installing-mtc.adoc index f5a1a78b880d..cb9c5f44b3b5 100644 --- a/migration_toolkit_for_containers/installing-mtc.adoc +++ b/migration_toolkit_for_containers/installing-mtc.adoc @@ -70,7 +70,7 @@ You must configure an object storage to use as a replication repository. The {mt * xref:../migration_toolkit_for_containers/installing-mtc.adoc#migration-configuring-mcg_installing-mtc[Multicloud Object Gateway] * xref:../migration_toolkit_for_containers/installing-mtc.adoc#migration-configuring-aws-s3_installing-mtc[Amazon Web Services S3] -* xref:../migration_toolkit_for_containers/installing-mtc.adoc#migration-configuring-gcp_installing-mtc[Google Cloud Platform] +* xref:../migration_toolkit_for_containers/installing-mtc.adoc#migration-configuring-gcp_installing-mtc[{gcp-full}] * xref:../migration_toolkit_for_containers/installing-mtc.adoc#migration-configuring-azure_installing-mtc[Microsoft Azure Blob] * Generic S3 object storage, for example, Minio or Ceph S3 diff --git a/modules/about-manually-maintained-credentials-upgrade.adoc b/modules/about-manually-maintained-credentials-upgrade.adoc index 4b6714b789b4..1cefb7af37f0 100644 --- a/modules/about-manually-maintained-credentials-upgrade.adoc +++ b/modules/about-manually-maintained-credentials-upgrade.adoc @@ -51,7 +51,7 @@ Administrators of clusters on these platforms must take the following actions: . Manually update the cloud provider resources for the new release. . Indicate that the cluster is ready to update with the `upgradeable-to` annotation. -Amazon Web Services (AWS), global Microsoft Azure, and Google Cloud Platform (GCP):: +Amazon Web Services (AWS), global Microsoft Azure, and {gcp-first}:: Clusters installed on these platforms support multiple CCO modes. + The required update process depends on the mode that the cluster is configured to use. If you are not sure what mode the CCO is configured to use on your cluster, you can use the web console or the CLI to determine this information. diff --git a/modules/admin-credentials-root-secret-formats.adoc b/modules/admin-credentials-root-secret-formats.adoc index ff2ab4d917eb..32b997ad613d 100644 --- a/modules/admin-credentials-root-secret-formats.adoc +++ b/modules/admin-credentials-root-secret-formats.adoc @@ -79,7 +79,7 @@ azure_resourcegroup: mycluster-2mpcn-rg ---- endif::passthrough[] -.Google Cloud Platform (GCP) secret format +.{gcp-first} secret format [source,yaml] ---- diff --git a/modules/cco-ccoctl-creating-at-once.adoc b/modules/cco-ccoctl-creating-at-once.adoc index 9a5b13191d53..d33213f0e7d4 100644 --- a/modules/cco-ccoctl-creating-at-once.adoc +++ b/modules/cco-ccoctl-creating-at-once.adoc @@ -105,9 +105,9 @@ Otherwise, you can create the AWS resources individually. For more information, endif::aws-sts[] ifdef::google-cloud-platform[] -= Creating GCP resources with the Cloud Credential Operator utility += Creating {gcp-short} resources with the Cloud Credential Operator utility -You can use the `ccoctl gcp create-all` command to automate the creation of GCP resources. +You can use the `ccoctl gcp create-all` command to automate the creation of {gcp-short} resources. endif::google-cloud-platform[] ifdef::azure-workload-id[] = Creating Azure resources with the Cloud Credential Operator utility @@ -203,9 +203,9 @@ $ ccoctl gcp create-all \ --credentials-requests-dir= <4> ---- <1> Specify the user-defined name for all created {gcp-short} resources used for tracking. If you plan to install the {gcp-short} Filestore Container Storage Interface (CSI) Driver Operator, retain this value. -<2> Specify the GCP region in which cloud resources will be created. -<3> Specify the GCP project ID in which cloud resources will be created. -<4> Specify the directory containing the files of `CredentialsRequest` manifests to create GCP service accounts. +<2> Specify the {gcp-short} region in which cloud resources will be created. +<3> Specify the {gcp-short} project ID in which cloud resources will be created. +<4> Specify the directory containing the files of `CredentialsRequest` manifests to create {gcp-short} service accounts. + [NOTE] ==== @@ -285,7 +285,7 @@ openshift-ingress-operator-cloud-credentials-credentials.yaml openshift-machine-api-gcp-cloud-credentials-credentials.yaml ---- + -You can verify that the IAM service accounts are created by querying GCP. For more information, refer to GCP documentation on listing IAM service accounts. +You can verify that the IAM service accounts are created by querying GCP. For more information, refer to {gcp-short} documentation on listing IAM service accounts. endif::google-cloud-platform[] ifdef::azure-workload-id[] + diff --git a/modules/cco-ccoctl-deleting-sts-resources.adoc b/modules/cco-ccoctl-deleting-sts-resources.adoc index c74bf14f92e0..fe5ca08b0cc6 100644 --- a/modules/cco-ccoctl-deleting-sts-resources.adoc +++ b/modules/cco-ccoctl-deleting-sts-resources.adoc @@ -11,7 +11,7 @@ ifeval::["{context}" == "uninstall-cluster-aws"] :aws-sts: endif::[] ifeval::["{context}" == "uninstalling-cluster-gcp"] -:cp-first: Google Cloud Platform +:cp-first: Google Cloud :cp: GCP :cp-name: gcp :gcp-workload-id: @@ -84,7 +84,7 @@ endif::azure-workload-id[] ifdef::aws-sts,azure-workload-id[<2> `<{cp-name}_region>` is the {cp} region in which to delete cloud resources.] ifdef::gcp-workload-id[] <2> `<{cp-name}_project_id>` is the {cp} project ID in which to delete cloud resources. -<3> Optional: This parameter deletes the custom roles that the `ccoctl` utility creates during installation. GCP does not permanently delete custom roles immediately. For more information, see GCP documentation about link:https://cloud.google.com/iam/docs/creating-custom-roles#deleting-custom-role[deleting a custom role]. +<3> Optional: This parameter deletes the custom roles that the `ccoctl` utility creates during installation. {gcp-short} does not permanently delete custom roles immediately. For more information, see {gcp-short} documentation about link:https://cloud.google.com/iam/docs/creating-custom-roles#deleting-custom-role[deleting a custom role]. endif::gcp-workload-id[] ifdef::azure-workload-id[<3> `<{cp-name}_subscription_id>` is the {cp} subscription ID for which to delete cloud resources.] ifdef::aws-sts[] @@ -107,7 +107,7 @@ ifdef::aws-sts[] 2021/04/08 17:51:11 IAM Role -openshift-machine-api-aws-cloud-credentials deleted 2021/04/08 17:51:39 Identity Provider with ARN arn:aws:iam:::oidc-provider/-oidc.s3..amazonaws.com deleted ---- -//Would love a GCP and Azure version of the above output. +//Would love a {gcp-short} and Azure version of the above output. endif::aws-sts[] .Verification @@ -120,7 +120,7 @@ ifeval::["{context}" == "uninstall-cluster-aws"] :!aws-sts: endif::[] ifeval::["{context}" == "uninstalling-cluster-gcp"] -:!cp-first: Google Cloud Platform +:!cp-first: Google Cloud :!cp: GCP :!gcp-workload-id: endif::[] diff --git a/modules/cco-ccoctl-install-creating-manifests.adoc b/modules/cco-ccoctl-install-creating-manifests.adoc index 73568dbd6bb5..a26bf981c5b9 100644 --- a/modules/cco-ccoctl-install-creating-manifests.adoc +++ b/modules/cco-ccoctl-install-creating-manifests.adoc @@ -77,9 +77,9 @@ To implement short-term security credentials managed outside the cluster for ind .Procedure ifdef::google-cloud-platform[] -. Add the following granular permissions to the GCP account that the installation program uses: +. Add the following granular permissions to the {gcp-short} account that the installation program uses: + -.Required GCP permissions +.Required {gcp-short} permissions [%collapsible] ==== * compute.machineTypes.list diff --git a/modules/cco-ccoctl-upgrading.adoc b/modules/cco-ccoctl-upgrading.adoc index 4bccfbc7f298..b958755adf64 100644 --- a/modules/cco-ccoctl-upgrading.adoc +++ b/modules/cco-ccoctl-upgrading.adoc @@ -56,10 +56,10 @@ $ ccoctl gcp create-all \ --credentials-requests-dir= \// <4> --output-dir= <5> ---- -<1> Specify the user-defined name for all created GCP resources used for tracking. -<2> Specify the GCP region in which cloud resources will be created. -<3> Specify the GCP project ID in which cloud resources will be created. -<4> Specify the directory containing the files of `CredentialsRequest` manifests to create GCP service accounts. +<1> Specify the user-defined name for all created {gcp-short} resources used for tracking. +<2> Specify the {gcp-full} region in which cloud resources will be created. +<3> Specify the {gcp-short} project ID in which cloud resources will be created. +<4> Specify the directory containing the files of `CredentialsRequest` manifests to create {gcp-short} service accounts. <5> Optional: Specify the directory in which you want the `ccoctl` utility to create objects. By default, the utility creates objects in the directory in which the commands are run. ==== + diff --git a/modules/cco-determine-mode-cli.adoc b/modules/cco-determine-mode-cli.adoc index 6d19e27241c2..b640d085ad1e 100644 --- a/modules/cco-determine-mode-cli.adoc +++ b/modules/cco-determine-mode-cli.adoc @@ -19,7 +19,7 @@ You can determine what mode the Cloud Credential Operator (CCO) is configured to [NOTE] ==== -Only Amazon Web Services (AWS), global Microsoft Azure, and Google Cloud Platform (GCP) clusters support multiple CCO modes. +Only Amazon Web Services (AWS), global Microsoft Azure, and {gcp-first} clusters support multiple CCO modes. ==== .Prerequisites @@ -50,18 +50,18 @@ The following output values are possible, though not all are supported on all pl + [IMPORTANT] ==== -To determine the specific configuration of an AWS, GCP, or global Microsoft Azure cluster that has a `spec.credentialsMode` of `''`, `Mint`, or `Manual`, you must investigate further. +To determine the specific configuration of an AWS, {gcp-short}, or global Microsoft Azure cluster that has a `spec.credentialsMode` of `''`, `Mint`, or `Manual`, you must investigate further. -AWS and GCP clusters support using mint mode with the root secret deleted. +AWS and {gcp-short} clusters support using mint mode with the root secret deleted. ifdef::update[] If the cluster is specifically configured to use mint mode or uses mint mode by default, you must determine if the root secret is present on the cluster before updating. endif::update[] -An AWS, GCP, or global Microsoft Azure cluster that uses manual mode might be configured to create and manage cloud credentials from outside of the cluster with AWS STS, GCP Workload Identity, or {entra-first}. You can determine whether your cluster uses this strategy by examining the cluster `Authentication` object. +An AWS, {gcp-full}, or global Microsoft Azure cluster that uses manual mode might be configured to create and manage cloud credentials from outside of the cluster with AWS STS, GCP Workload Identity, or {entra-first}. You can determine whether your cluster uses this strategy by examining the cluster `Authentication` object. ==== ifdef::about-cco[] -. AWS or GCP clusters that use the default (`''`) only: To determine whether the cluster is operating in mint or passthrough mode, run the following command: +. AWS or {gcp-short} clusters that use the default (`''`) only: To determine whether the cluster is operating in mint or passthrough mode, run the following command: + [source,terminal] ---- @@ -71,7 +71,7 @@ $ oc get secret \ --template '{ .metadata.annotations }' ---- + -where `` is `aws-creds` for AWS or `gcp-credentials` for GCP. +where `` is `aws-creds` for AWS or `gcp-credentials` for {gcp-full}. + This command displays the value of the `.metadata.annotations` parameter in the cluster root secret object. The following output values are possible: + @@ -83,7 +83,7 @@ This command displays the value of the `.metadata.annotations` parameter in the If your cluster uses mint mode, you can also determine whether the cluster is operating without the root secret. endif::about-cco[] -. AWS or GCP clusters that use mint mode only: To determine whether the cluster is operating without the root secret, run the following command: +. AWS or {gcp-short} clusters that use mint mode only: To determine whether the cluster is operating without the root secret, run the following command: + [source,terminal] ---- @@ -91,11 +91,11 @@ $ oc get secret \ -n=kube-system ---- + -where `` is `aws-creds` for AWS or `gcp-credentials` for GCP. +where `` is `aws-creds` for AWS or `gcp-credentials` for {gcp-full}. + If the root secret is present, the output of this command returns information about the secret. An error indicates that the root secret is not present on the cluster. -. AWS, GCP, or global Microsoft Azure clusters that use manual mode only: To determine whether the cluster is configured to create and manage cloud credentials from outside of the cluster, run the following command: +. AWS, {gcp-short}, or global Microsoft Azure clusters that use manual mode only: To determine whether the cluster is configured to create and manage cloud credentials from outside of the cluster, run the following command: + [source,terminal] ---- diff --git a/modules/cco-determine-mode-gui.adoc b/modules/cco-determine-mode-gui.adoc index 4317a60ed51d..8418c6e9ec34 100644 --- a/modules/cco-determine-mode-gui.adoc +++ b/modules/cco-determine-mode-gui.adoc @@ -19,7 +19,7 @@ You can determine what mode the Cloud Credential Operator (CCO) is configured to [NOTE] ==== -Only Amazon Web Services (AWS), global Microsoft Azure, and Google Cloud Platform (GCP) clusters support multiple CCO modes. +Only Amazon Web Services (AWS), global Microsoft Azure, and {gcp-first} clusters support multiple CCO modes. ==== .Prerequisites @@ -49,18 +49,18 @@ Only Amazon Web Services (AWS), global Microsoft Azure, and Google Cloud Platfor + [IMPORTANT] ==== -To determine the specific configuration of an AWS, GCP, or global Microsoft Azure cluster that has a `spec.credentialsMode` of `''`, `Mint`, or `Manual`, you must investigate further. +To determine the specific configuration of an AWS, {gcp-short}, or global Microsoft Azure cluster that has a `spec.credentialsMode` of `''`, `Mint`, or `Manual`, you must investigate further. -AWS and GCP clusters support using mint mode with the root secret deleted. +AWS and {gcp-short} clusters support using mint mode with the root secret deleted. ifdef::update[] If the cluster is specifically configured to use mint mode or uses mint mode by default, you must determine if the root secret is present on the cluster before updating. endif::update[] -An AWS, GCP, or global Microsoft Azure cluster that uses manual mode might be configured to create and manage cloud credentials from outside of the cluster with AWS STS, GCP Workload Identity, or {entra-first}. You can determine whether your cluster uses this strategy by examining the cluster `Authentication` object. +An AWS, {gcp-full}, or global Microsoft Azure cluster that uses manual mode might be configured to create and manage cloud credentials from outside of the cluster with AWS STS, GCP Workload Identity, or {entra-first}. You can determine whether your cluster uses this strategy by examining the cluster `Authentication` object. ==== ifdef::about-cco[] -. AWS or GCP clusters that use the default (`''`) only: To determine whether the cluster is operating in mint or passthrough mode, inspect the annotations on the cluster root secret: +. AWS or {gcp-short} clusters that use the default (`''`) only: To determine whether the cluster is operating in mint or passthrough mode, inspect the annotations on the cluster root secret: .. Navigate to *Workloads* -> *Secrets* and look for the root secret for your cloud provider. + @@ -77,7 +77,7 @@ Ensure that the *Project* dropdown is set to *All Projects*. |AWS |`aws-creds` -|GCP +|{gcp-full} |`gcp-credentials` |=== @@ -92,7 +92,7 @@ Ensure that the *Project* dropdown is set to *All Projects*. If your cluster uses mint mode, you can also determine whether the cluster is operating without the root secret. endif::about-cco[] -. AWS or GCP clusters that use mint mode only: To determine whether the cluster is operating without the root secret, navigate to *Workloads* -> *Secrets* and look for the root secret for your cloud provider. +. AWS or {gcp-short} clusters that use mint mode only: To determine whether the cluster is operating without the root secret, navigate to *Workloads* -> *Secrets* and look for the root secret for your cloud provider. + [NOTE] ==== @@ -107,7 +107,7 @@ Ensure that the *Project* dropdown is set to *All Projects*. |AWS |`aws-creds` -|GCP +|{gcp-full} |`gcp-credentials` |=== @@ -117,7 +117,7 @@ Ensure that the *Project* dropdown is set to *All Projects*. * If you do not see these values, your cluster is using the CCO in mint mode with the root secret removed. -- -. AWS, GCP, or global Microsoft Azure clusters that use manual mode only: To determine whether the cluster is configured to create and manage cloud credentials from outside of the cluster, you must check the cluster `Authentication` object YAML values. +. AWS, {gcp-short}, or global Microsoft Azure clusters that use manual mode only: To determine whether the cluster is configured to create and manage cloud credentials from outside of the cluster, you must check the cluster `Authentication` object YAML values. .. Navigate to *Administration* -> *Cluster Settings*. diff --git a/modules/cco-short-term-creds-auth-flow-gcp.adoc b/modules/cco-short-term-creds-auth-flow-gcp.adoc index 2204d243cbcf..2ded357680fe 100644 --- a/modules/cco-short-term-creds-auth-flow-gcp.adoc +++ b/modules/cco-short-term-creds-auth-flow-gcp.adoc @@ -4,11 +4,11 @@ :_mod-docs-content-type: REFERENCE [id="cco-short-term-creds-auth-flow-gcp_{context}"] -= GCP Workload Identity authentication process += {gcp-short} Workload Identity authentication process -Requests for new and refreshed credentials are automated by using an appropriately configured OpenID Connect (OIDC) identity provider combined with IAM service accounts. Service account tokens that are trusted by GCP are signed by {product-title} and can be projected into a pod and used for authentication. Tokens are refreshed after one hour. +Requests for new and refreshed credentials are automated by using an appropriately configured OpenID Connect (OIDC) identity provider combined with IAM service accounts. Service account tokens that are trusted by {gcp-short} are signed by {product-title} and can be projected into a pod and used for authentication. Tokens are refreshed after one hour. -The following diagram details the authentication flow between GCP and the {product-title} cluster when using GCP Workload Identity. +The following diagram details the authentication flow between {gcp-short} and the {product-title} cluster when using {gcp-short} Workload Identity. -.GCP Workload Identity authentication flow -image::347_OpenShift_credentials_with_STS_updates_0623_GCP.png[Detailed authentication flow between GCP and the cluster when using GCP Workload Identity] \ No newline at end of file +.{gcp-full} Workload Identity authentication flow +image::347_OpenShift_credentials_with_STS_updates_0623_GCP.png[Detailed authentication flow between {gcp-full} and the cluster when using {gcp-full} Workload Identity] \ No newline at end of file diff --git a/modules/cco-short-term-creds-component-permissions-gcp.adoc b/modules/cco-short-term-creds-component-permissions-gcp.adoc index 1248c24440dd..0368fc027043 100644 --- a/modules/cco-short-term-creds-component-permissions-gcp.adoc +++ b/modules/cco-short-term-creds-component-permissions-gcp.adoc @@ -4,6 +4,6 @@ :_mod-docs-content-type: REFERENCE [id="cco-short-term-creds-component-permissions-gcp_{context}"] -= GCP component secret permissions requirements += {gcp-short} component secret permissions requirements //This topic is a placeholder for when GCP role granularity can bbe documented \ No newline at end of file diff --git a/modules/cco-short-term-creds-format-gcp.adoc b/modules/cco-short-term-creds-format-gcp.adoc index e24da6ef8ed6..09dfb629cbff 100644 --- a/modules/cco-short-term-creds-format-gcp.adoc +++ b/modules/cco-short-term-creds-format-gcp.adoc @@ -4,11 +4,11 @@ :_mod-docs-content-type: REFERENCE [id="cco-short-term-creds-format-gcp_{context}"] -= GCP component secret formats += {gcp-short} component secret formats -Using manual mode with GCP Workload Identity changes the content of the GCP credentials that are provided to individual {product-title} components. Compare the following secret content: +Using manual mode with {gcp-short} Workload Identity changes the content of the {gcp-short} credentials that are provided to individual {product-title} components. Compare the following secret content: -.GCP secret format +.{gcp-full} secret format [source,yaml] ---- @@ -42,9 +42,9 @@ data: } ---- <1> The credential type is `service_account`. -<2> The private RSA key that is used to authenticate to GCP. This key must be kept secure and is not rotated. +<2> The private RSA key that is used to authenticate to {gcp-short}. This key must be kept secure and is not rotated. -.Content of the Base64 encoded `service_account.json` file using GCP Workload Identity +.Content of the Base64 encoded `service_account.json` file using {gcp-short} Workload Identity [source,json] ---- @@ -63,6 +63,6 @@ data: } ---- <1> The credential type is `external_account`. -<2> The target audience is the GCP Workload Identity provider. +<2> The target audience is the {gcp-short} Workload Identity provider. <3> The resource URL of the service account that can be impersonated with these credentials. <4> The path to the service account token inside the pod. By convention, this is `/var/run/secrets/openshift/serviceaccount/token` for {product-title} components. \ No newline at end of file diff --git a/modules/ccs-gcp-customer-procedure-serviceaccount.adoc b/modules/ccs-gcp-customer-procedure-serviceaccount.adoc index 7b62295ea60f..4a317524ce2f 100644 --- a/modules/ccs-gcp-customer-procedure-serviceaccount.adoc +++ b/modules/ccs-gcp-customer-procedure-serviceaccount.adoc @@ -11,7 +11,7 @@ Besides the required customer procedures listed in _Required customer procedure_ .Procedure -. To ensure that Red Hat can perform necessary actions, you must create an `osd-ccs-admin` IAM link:https://cloud.google.com/iam/docs/creating-managing-service-accounts#creating_a_service_account[service account] user within the GCP project. +. To ensure that Red Hat can perform necessary actions, you must create an `osd-ccs-admin` IAM link:https://cloud.google.com/iam/docs/creating-managing-service-accounts#creating_a_service_account[service account] user within the {gcp-short} project. + diff --git a/modules/ccs-gcp-customer-procedure-wif.adoc b/modules/ccs-gcp-customer-procedure-wif.adoc index 592171d06b2e..8b90085cc0e8 100644 --- a/modules/ccs-gcp-customer-procedure-wif.adoc +++ b/modules/ccs-gcp-customer-procedure-wif.adoc @@ -26,7 +26,7 @@ The following roles are only required when creating, updating, or deleting WIF c |Role Administrator |`roles/iam.roleAdmin` -|Required by the GCP client in the OCM CLI for creating custom roles. +|Required by the {gcp-short} client in the OCM CLI for creating custom roles. |Service Account Admin |`roles/iam.serviceAccountAdmin` diff --git a/modules/ccs-gcp-customer-procedure.adoc b/modules/ccs-gcp-customer-procedure.adoc index 868317064bb8..b41653312138 100644 --- a/modules/ccs-gcp-customer-procedure.adoc +++ b/modules/ccs-gcp-customer-procedure.adoc @@ -6,11 +6,11 @@ = Required customer procedure -The Customer Cloud Subscription (CCS) model allows Red{nbsp}Hat to deploy and manage {product-title} into a customer's Google Cloud Platform (GCP) project. Red{nbsp}Hat requires several prerequisites to be completed before providing these services. +The Customer Cloud Subscription (CCS) model allows Red{nbsp}Hat to deploy and manage {product-title} into a customer's {gcp-first} project. Red{nbsp}Hat requires several prerequisites to be completed before providing these services. [NOTE] ==== The following requirements in this topic apply to {product-title} on {GCP} clusters created using both the Workload Identity Federation (WIF) and service account authentication types. -Red{nbsp}Hat recommends using WIF as the authentication type for installing and interacting with an {product-title} cluster deployed on GCP because WIF provides enhanced security. +Red{nbsp}Hat recommends using WIF as the authentication type for installing and interacting with an {product-title} cluster deployed on {gcp-short} because WIF provides enhanced security. For information about creating a cluster using the WIF authentication type, see _Additional resources_. @@ -20,26 +20,26 @@ For additional requirements that apply to the service account authentication typ .Prerequisites -Before using {product-title} in your GCP project, confirm that the following organizational policy constraints are configured correctly where applicable: +Before using {product-title} in your {gcp-short} project, confirm that the following organizational policy constraints are configured correctly where applicable: * `constraints/iam.allowedPolicyMemberDomains` ** This policy constraint is supported only if Red{nbsp}Hat's Directory Customer ID's `C02k0l5e8` and `C04j7mbwl` are included in the allowlist. * `constraints/compute.restrictLoadBalancerCreationForTypes` -** This policy constraint is supported only when creating a private cluster with GCP Private Service Connect (PSC). You must ensure that the `INTERNAL_TCP_UDP` load balancer type is included in the allowlist or excluded from the deny list. +** This policy constraint is supported only when creating a private cluster with {gcp-short} Private Service Connect (PSC). You must ensure that the `INTERNAL_TCP_UDP` load balancer type is included in the allowlist or excluded from the deny list. + [IMPORTANT] ==== -Although the `EXTERNAL_NETWORK_TCP_UDP` load balancer type is not required when creating a private cluster with GCP Private Service Connect (PSC), disallowing it via this constraint will prevent the cluster from being able to create externally accessible load balancers. +Although the `EXTERNAL_NETWORK_TCP_UDP` load balancer type is not required when creating a private cluster with {gcp-short} Private Service Connect (PSC), disallowing it via this constraint will prevent the cluster from being able to create externally accessible load balancers. ==== * `constraints/compute.requireShieldedVm` ** This policy constraint is supported only if the cluster is created with *Enable Secure Boot support for Shielded VMs* selected during the initial cluster creation. * `constraints/compute.vmExternalIpAccess` -** This policy constraint is supported only when creating a private cluster with GCP Private Service Connect (PSC). For all other cluster types, this policy constraint is supported only after cluster creation. +** This policy constraint is supported only when creating a private cluster with {gcp-short} Private Service Connect (PSC). For all other cluster types, this policy constraint is supported only after cluster creation. * `constraints/compute.trustedImageProjects` ** This policy constraint is supported only when the projects `redhat-marketplace-public`, `rhel-cloud`, and `rhcos-cloud` are included in the allowlist. If this policy constraint is enabled and these projects are not included in the allowlist, cluster creation will fail. -For more information about configuring GCP organization policy constraints, see link:https://cloud.google.com/resource-manager/docs/organization-policy/org-policy-constraints[Organization policy constraints]. +For more information about configuring {gcp-short} organization policy constraints, see link:https://cloud.google.com/resource-manager/docs/organization-policy/org-policy-constraints[Organization policy constraints]. .Procedure @@ -88,7 +88,7 @@ For more information about configuring GCP organization policy constraints, see |link:https://cloud.google.com/service-infrastructure/docs/service-management/reference/rest[Service Management API] |`servicemanagement.googleapis.com` -|Used indirectly to fetch quota information for GCP resources. +|Used indirectly to fetch quota information for {gcp-short} resources. |link:https://cloud.google.com/service-usage/docs/reference/rest[Service Usage API] |`serviceusage.googleapis.com` diff --git a/modules/ccs-gcp-customer-requirements.adoc b/modules/ccs-gcp-customer-requirements.adoc index a40ef546608e..fc4edc333707 100644 --- a/modules/ccs-gcp-customer-requirements.adoc +++ b/modules/ccs-gcp-customer-requirements.adoc @@ -6,22 +6,22 @@ = Customer requirements -{product-title} clusters using a Customer Cloud Subscription (CCS) model on Google Cloud Platform (GCP) must meet several prerequisites before they can be deployed. +{product-title} clusters using a Customer Cloud Subscription (CCS) model on {gcp-first} must meet several prerequisites before they can be deployed. [id="ccs-gcp-requirements-account_{context}"] == Account -* The customer ensures that link:https://cloud.google.com/storage/quotas[Google Cloud limits] and link:https://cloud.google.com/compute/resource-usage[allocation quotas that apply to Compute Engine] are sufficient to support {product-title} provisioned within the customer-provided GCP account. +* The customer ensures that link:https://cloud.google.com/storage/quotas[Google Cloud limits] and link:https://cloud.google.com/compute/resource-usage[allocation quotas that apply to Compute Engine] are sufficient to support {product-title} provisioned within the customer-provided {gcp-short} account. -* The customer-provided GCP account should be in the customer's Google Cloud Organization. +* The customer-provided {gcp-short} account should be in the customer's Google Cloud Organization. -* The customer-provided GCP account must not be transferable to Red{nbsp}Hat. +* The customer-provided {gcp-short} account must not be transferable to Red{nbsp}Hat. -* The customer may not impose GCP usage restrictions on Red{nbsp}Hat activities. Imposing restrictions severely hinders Red{nbsp}Hat's ability to respond to incidents. +* The customer may not impose {gcp-short} usage restrictions on Red{nbsp}Hat activities. Imposing restrictions severely hinders Red{nbsp}Hat's ability to respond to incidents. -* Red{nbsp}Hat deploys monitoring into GCP to alert Red{nbsp}Hat when a highly privileged account, such as a root account, logs into the customer-provided GCP account. +* Red{nbsp}Hat deploys monitoring into {gcp-short} to alert Red{nbsp}Hat when a highly privileged account, such as a root account, logs into the customer-provided {gcp-short} account. -* The customer can deploy native GCP services within the same customer-provided GCP account. +* The customer can deploy native {gcp-short} services within the same customer-provided {gcp-short} account. + [NOTE] ==== @@ -35,32 +35,32 @@ Customers are encouraged, but not mandated, to deploy resources in a Virtual Pri + [NOTE] ==== -This policy only provides Red{nbsp}Hat with permissions and capabilities to change resources in the customer-provided GCP account. +This policy only provides Red{nbsp}Hat with permissions and capabilities to change resources in the customer-provided {gcp-short} account. ==== -* Red{nbsp}Hat must have GCP console access to the customer-provided GCP account. This access is protected and managed by Red{nbsp}Hat. +* Red{nbsp}Hat must have {gcp-short} console access to the customer-provided {gcp-short} account. This access is protected and managed by Red{nbsp}Hat. -* The customer must not utilize the GCP account to elevate their permissions within the {product-title} cluster. +* The customer must not utilize the {gcp-short} account to elevate their permissions within the {product-title} cluster. -* Actions available in the {cluster-manager-url} must not be directly performed in the customer-provided GCP account. +* Actions available in the {cluster-manager-url} must not be directly performed in the customer-provided {gcp-short} account. [id="ccs-gcp-requirements-support_{context}"] == Support requirements -* Red{nbsp}Hat recommends that the customer have at least link:https://cloud.google.com/support[Enhanced Support] from GCP. +* Red{nbsp}Hat recommends that the customer have at least link:https://cloud.google.com/support[Enhanced Support] from {gcp-short}. -* Red{nbsp}Hat has authority from the customer to request GCP support on their behalf. +* Red{nbsp}Hat has authority from the customer to request {gcp-short} support on their behalf. -* Red{nbsp}Hat has authority from the customer to request GCP resource limit increases on the customer-provided account. +* Red{nbsp}Hat has authority from the customer to request {gcp-short} resource limit increases on the customer-provided account. * Red{nbsp}Hat manages the restrictions, limitations, expectations, and defaults for all {product-title} clusters in the same manner, unless otherwise specified in this requirements section. [id="ccs-gcp-requirements-security_{context}"] == Security requirements -* The customer-provided IAM credentials must be unique to the customer-provided GCP account and must not be stored anywhere in the customer-provided GCP account. +* The customer-provided IAM credentials must be unique to the customer-provided {gcp-short} account and must not be stored anywhere in the customer-provided {gcp-short} account. -* Volume snapshots will remain within the customer-provided GCP account and customer-specified region. +* Volume snapshots will remain within the customer-provided {gcp-short} account and customer-specified region. * To manage, monitor, and troubleshoot {product-title} clusters, Red{nbsp}Hat must have direct access to the cluster's API server. You must not restrict or otherwise prevent Red{nbsp}Hat's access to the {product-title} cluster's API server. + diff --git a/modules/ccs-gcp-iam.adoc b/modules/ccs-gcp-iam.adoc index 6dc1f99c157d..7ca9ff13f010 100644 --- a/modules/ccs-gcp-iam.adoc +++ b/modules/ccs-gcp-iam.adoc @@ -6,7 +6,7 @@ = Red Hat managed Google Cloud resources -Red Hat is responsible for creating and managing the following IAM Google Cloud Platform (GCP) resources. +Red Hat is responsible for creating and managing the following IAM {gcp-first} resources. [IMPORTANT] ===== @@ -16,7 +16,7 @@ The _IAM service account and roles_ and _IAM group and roles_ topics are only ap [id="ccs-gcp-iam-service-account-roles_{context}"] == IAM service account and roles -The `osd-managed-admin` IAM service account is created immediately after taking control of the customer-provided GCP account. This is the user that will perform the {product-title} cluster installation. +The `osd-managed-admin` IAM service account is created immediately after taking control of the customer-provided {gcp-short} account. This is the user that will perform the {product-title} cluster installation. The following roles are attached to the service account: @@ -66,7 +66,7 @@ When applied to an individual *bucket*, control applies only to the specified bu [id="ccs-gcp-iam-group-roles_{context}"] == IAM group and roles -The `sd-sre-platform-gcp-access` Google group is granted access to the GCP project to allow Red Hat Site Reliability Engineering (SRE) access to the console for emergency troubleshooting purposes. +The `sd-sre-platform-gcp-access` Google group is granted access to the {gcp-short} project to allow Red Hat Site Reliability Engineering (SRE) access to the console for emergency troubleshooting purposes. [NOTE] ==== diff --git a/modules/ccs-gcp-provisioned.adoc b/modules/ccs-gcp-provisioned.adoc index 10701dd56e96..b2114dd181f7 100644 --- a/modules/ccs-gcp-provisioned.adoc +++ b/modules/ccs-gcp-provisioned.adoc @@ -3,14 +3,14 @@ // * osd_planning/gcp-ccs.adoc [id="ccs-gcp-provisioned_{context}"] -= Provisioned GCP Infrastructure += Provisioned {gcp-short} Infrastructure -This is an overview of the provisioned Google Cloud Platform (GCP) components on a deployed {product-title} cluster. For a more detailed listing of all provisioned GCP components, see the link:https://access.redhat.com/documentation/en-us/openshift_container_platform/[{OCP} documentation]. +This is an overview of the provisioned {gcp-first} components on a deployed {product-title} cluster. For a more detailed listing of all provisioned {gcp-first} components, see the link:https://access.redhat.com/documentation/en-us/openshift_container_platform/[{OCP} documentation]. [id="gcp-policy-instances_{context}"] == Compute instances -GCP compute instances are required to deploy the control plane and data plane functions of {product-title} in GCP. Instance types might vary for control plane and infrastructure nodes depending on worker node count. +{gcp-full} compute instances are required to deploy the control plane and data plane functions of {product-title} in {gcp-short}. Instance types might vary for control plane and infrastructure nodes depending on worker node count. * Single availability zone ** 2 infra nodes (n2-highmem-4 machine type: 4 vCPU and 32 GB RAM) diff --git a/modules/ccs-gcp-understand.adoc b/modules/ccs-gcp-understand.adoc index 9f8ca5c08869..41a28c5f1146 100644 --- a/modules/ccs-gcp-understand.adoc +++ b/modules/ccs-gcp-understand.adoc @@ -4,13 +4,13 @@ :_mod-docs-content-type: CONCEPT [id="ccs-gcp-understand_{context}"] -= Understanding Customer Cloud Subscriptions on GCP += Understanding Customer Cloud Subscriptions on {gcp-full} Red{nbsp}Hat {product-title} provides a Customer Cloud Subscription (CCS) model that allows Red{nbsp}Hat to deploy and manage {product-title} into a customer's existing {GCP} account. Red{nbsp}Hat requires several prerequisites be met in order to provide this service. -Red{nbsp}Hat recommends the usage of a GCP project, managed by the customer, to organize all of your GCP resources. A project consists of a set of users and APIs, as well as billing, authentication, and monitoring settings for those APIs. +Red{nbsp}Hat recommends the usage of a {gcp-short} project, managed by the customer, to organize all of your {gcp-short} resources. A project consists of a set of users and APIs, as well as billing, authentication, and monitoring settings for those APIs. -It is recommended for the {product-title} cluster using a CCS model to be hosted in a GCP project within a GCP organization. The organization resource is the root node of the GCP resource hierarchy and all resources that belong to an organization are grouped under the organization node. Customers have the choice of using service account keys or Workload Identity Federation when creating the roles and credentials necessary to access Google Cloud resources within a GCP project. +It is recommended for the {product-title} cluster using a CCS model to be hosted in a {gcp-short} project within a {gcp-short} organization. The organization resource is the root node of the {gcp-short} resource hierarchy and all resources that belong to an organization are grouped under the organization node. Customers have the choice of using service account keys or Workload Identity Federation when creating the roles and credentials necessary to access Google Cloud resources within a {gcp-short} project. -For more information about creating and managing organization resources within GCP, see link:https://cloud.google.com/resource-manager/docs/creating-managing-organization[Creating and managing organization resources]. \ No newline at end of file +For more information about creating and managing organization resources within {gcp-full}, see link:https://cloud.google.com/resource-manager/docs/creating-managing-organization[Creating and managing organization resources]. \ No newline at end of file diff --git a/modules/cert-manager-acme-dns01-ambient-gcp.adoc b/modules/cert-manager-acme-dns01-ambient-gcp.adoc index 1349065166b2..e83e3d2e6a6e 100644 --- a/modules/cert-manager-acme-dns01-ambient-gcp.adoc +++ b/modules/cert-manager-acme-dns01-ambient-gcp.adoc @@ -4,14 +4,14 @@ :_mod-docs-content-type: PROCEDURE [id="cert-manager-acme-dns01-ambient-gcp_{context}"] -= Configuring an ACME issuer by using ambient credentials on GCP += Configuring an ACME issuer by using ambient credentials on {gcp-full} -You can use the {cert-manager-operator} to set up an ACME issuer to solve DNS-01 challenges by using ambient credentials on GCP. This procedure uses _Let's Encrypt_ as the ACME CA server and shows how to solve DNS-01 challenges with Google CloudDNS. +You can use the {cert-manager-operator} to set up an ACME issuer to solve DNS-01 challenges by using ambient credentials on {gcp-short}. This procedure uses _Let's Encrypt_ as the ACME CA server and shows how to solve DNS-01 challenges with Google CloudDNS. .Prerequisites -* If your cluster is configured to use GCP Workload Identity, you followed the instructions from the _Configuring cloud credentials for the cert-manager Operator for Red Hat OpenShift with GCP Workload Identity_ section. -* If your cluster does not use GCP Workload Identity, you followed the instructions from the _Configuring cloud credentials for the cert-manager Operator for Red Hat OpenShift on GCP_ section. +* If your cluster is configured to use {gcp-short} Workload Identity, you followed the instructions from the _Configuring cloud credentials for the cert-manager Operator for Red Hat OpenShift with {gcp-short} Workload Identity_ section. +* If your cluster does not use {gcp-full} Workload Identity, you followed the instructions from the _Configuring cloud credentials for the cert-manager Operator for Red Hat OpenShift on {gcp-full}_ section. .Procedure @@ -90,7 +90,7 @@ spec: <1> Provide a name for the issuer. <2> Replace `` with the name of the secret to store the ACME account private key in. <3> Specify the URL to access the ACME server's `directory` endpoint. This example uses the _Let's Encrypt_ staging environment. -<4> Replace `` with the name of the GCP project that contains the Cloud DNS zone. +<4> Replace `` with the name of the {gcp-short} project that contains the Cloud DNS zone. .. Create the `Issuer` object by running the following command: + diff --git a/modules/cert-manager-acme-dns01-explicit-gcp.adoc b/modules/cert-manager-acme-dns01-explicit-gcp.adoc index 433a3acdf885..4b9843d0b39f 100644 --- a/modules/cert-manager-acme-dns01-explicit-gcp.adoc +++ b/modules/cert-manager-acme-dns01-explicit-gcp.adoc @@ -4,9 +4,9 @@ :_mod-docs-content-type: PROCEDURE [id="cert-manager-acme-dns01-explicit-gcp_{context}"] -= Configuring an ACME issuer by using explicit credentials for GCP Cloud DNS += Configuring an ACME issuer by using explicit credentials for {gcp-short} Cloud DNS -You can use the {cert-manager-operator} to set up an ACME issuer to solve DNS-01 challenges by using explicit credentials on GCP. This procedure uses _Let's Encrypt_ as the ACME CA server and shows how to solve DNS-01 challenges with Google CloudDNS. +You can use the {cert-manager-operator} to set up an ACME issuer to solve DNS-01 challenges by using explicit credentials on {gcp-short}. This procedure uses _Let's Encrypt_ as the ACME CA server and shows how to solve DNS-01 challenges with Google CloudDNS. .Prerequisites @@ -14,7 +14,7 @@ You can use the {cert-manager-operator} to set up an ACME issuer to solve DNS-01 + [NOTE] ==== -You can use Google CloudDNS with explicit credentials in an {product-title} cluster that is not running on GCP. +You can use Google CloudDNS with explicit credentials in an {product-title} cluster that is not running on {gcp-short}. ==== .Procedure @@ -59,7 +59,7 @@ spec: $ oc new-project my-issuer-namespace ---- -. Create a secret to store your GCP credentials by running the following command: +. Create a secret to store your {gcp-short} credentials by running the following command: + [source,terminal] ---- @@ -96,9 +96,9 @@ spec: <2> Replace `` with your issuer namespace. <3> Replace `` with the name of the secret to store the ACME account private key in. <4> Specify the URL to access the ACME server's `directory` endpoint. This example uses the _Let's Encrypt_ staging environment. -<5> Replace `` with the name of the GCP project that contains the Cloud DNS zone. +<5> Replace `` with the name of the {gcp-short} project that contains the Cloud DNS zone. <6> Specify the name of the secret you created. -<7> Specify the key in the secret you created that stores your GCP secret access key. +<7> Specify the key in the secret you created that stores your {gcp-short} secret access key. .. Create the `Issuer` object by running the following command: + diff --git a/modules/cert-manager-configure-cloud-credentials-gcp-non-sts.adoc b/modules/cert-manager-configure-cloud-credentials-gcp-non-sts.adoc index ce8c7fb3e39c..640de1596ca1 100644 --- a/modules/cert-manager-configure-cloud-credentials-gcp-non-sts.adoc +++ b/modules/cert-manager-configure-cloud-credentials-gcp-non-sts.adoc @@ -4,7 +4,7 @@ :_mod-docs-content-type: PROCEDURE [id="cert-manager-configure-cloud-credentials-gcp-non-sts_{context}"] -= Authenticating on GCP += Authenticating on {gcp-full} .Prerequisites @@ -74,7 +74,7 @@ NAME READY STATUS RESTARTS AGE cert-manager-bd7fbb9fc-wvbbt 1/1 Running 0 15m39s ---- -. Verify that the cert-manager controller pod is updated with GCP credential volumes that are mounted under the path specified in `mountPath` by running the following command: +. Verify that the cert-manager controller pod is updated with {gcp-short} credential volumes that are mounted under the path specified in `mountPath` by running the following command: + [source,terminal] ---- diff --git a/modules/cert-manager-configure-cloud-credentials-gcp-sts.adoc b/modules/cert-manager-configure-cloud-credentials-gcp-sts.adoc index 167526259817..6b0659944d8b 100644 --- a/modules/cert-manager-configure-cloud-credentials-gcp-sts.adoc +++ b/modules/cert-manager-configure-cloud-credentials-gcp-sts.adoc @@ -4,13 +4,13 @@ :_mod-docs-content-type: PROCEDURE [id="cert-manager-configure-cloud-credentials-gcp-sts_{context}"] -= Authenticating with GCP Workload Identity += Authenticating with {gcp-short} Workload Identity .Prerequisites * You extracted and prepared the `ccoctl` binary. * You have installed version 1.11.1 or later of the {cert-manager-operator}. -* You have configured an {product-title} cluster with GCP Workload Identity by using the Cloud Credential Operator in a manual mode. +* You have configured an {product-title} cluster with {gcp-short} Workload Identity by using the Cloud Credential Operator in a manual mode. .Procedure @@ -105,7 +105,7 @@ NAME READY STATUS RESTARTS AGE cert-manager-bd7fbb9fc-wvbbt 1/1 Running 0 15m39s ---- -. Verify that the cert-manager controller pod is updated with GCP workload identity credential volumes that are mounted under the path specified in `mountPath` by running the following command: +. Verify that the cert-manager controller pod is updated with {gcp-short} workload identity credential volumes that are mounted under the path specified in `mountPath` by running the following command: + [source,terminal] ---- diff --git a/modules/cluster-logging-collector-log-forward-gcp.adoc b/modules/cluster-logging-collector-log-forward-gcp.adoc index e97cb76d993f..6c305c90bf3b 100644 --- a/modules/cluster-logging-collector-log-forward-gcp.adoc +++ b/modules/cluster-logging-collector-log-forward-gcp.adoc @@ -4,7 +4,7 @@ :_mod-docs-content-type: PROCEDURE [id="cluster-logging-collector-log-forward-gcp_{context}"] -= Forwarding logs to Google Cloud Platform (GCP) += Forwarding logs to {gcp-first} You can forward logs to link:https://cloud.google.com/logging/docs/basic-concepts[Google Cloud Logging] in addition to, or instead of, the internal default {product-title} log store. @@ -54,7 +54,7 @@ spec: <1> In legacy implementations, the CR name must be `instance`. In multi log forwarder implementations, you can use any name. <2> In legacy implementations, the CR namespace must be `openshift-logging`. In multi log forwarder implementations, you can use any namespace. <3> The name of your service account. The service account is only required in multi log forwarder implementations if the log forwarder is not deployed in the `openshift-logging` namespace. -<4> Set a `projectId`, `folderId`, `organizationId`, or `billingAccountId` field and its corresponding value, depending on where you want to store your logs in the link:https://cloud.google.com/resource-manager/docs/cloud-platform-resource-hierarchy[GCP resource hierarchy]. +<4> Set a `projectId`, `folderId`, `organizationId`, or `billingAccountId` field and its corresponding value, depending on where you want to store your logs in the link:https://cloud.google.com/resource-manager/docs/cloud-platform-resource-hierarchy[{gcp-full} resource hierarchy]. <5> Set the value to add to the `logName` field of the link:https://cloud.google.com/logging/docs/reference/v2/rest/v2/LogEntry[Log Entry]. <6> Specify which log types to forward by using the pipeline: `application`, `infrastructure`, or `audit`. diff --git a/modules/compute-machineset-upi-reqs.adoc b/modules/compute-machineset-upi-reqs.adoc index 6116509f5d2c..62a6168d1117 100644 --- a/modules/compute-machineset-upi-reqs.adoc +++ b/modules/compute-machineset-upi-reqs.adoc @@ -2,7 +2,7 @@ // // * machine_management/creating_machinesets/creating-machineset-vsphere.adoc // -// Currently only in the vSphere compute machine set content, but we will want this for other platforms such as AWS and GCP. +// Currently only in the vSphere compute machine set content, but we will want this for other platforms such as AWS and {gcp-short}. ifeval::["{context}" == "creating-machineset-vsphere"] :vsphere: diff --git a/modules/configuring-a-proxy-during-installation-ocm.adoc b/modules/configuring-a-proxy-during-installation-ocm.adoc index ee229ae77da8..37913d8ccd6a 100644 --- a/modules/configuring-a-proxy-during-installation-ocm.adoc +++ b/modules/configuring-a-proxy-during-installation-ocm.adoc @@ -21,7 +21,7 @@ endif::openshift-dedicated[] Prior to the installation, you must verify that the proxy is accessible from the VPC that the cluster is being installed into. The proxy must also be accessible from the private subnets of the VPC. ifdef::openshift-dedicated[] -For detailed steps to configure a cluster-wide proxy during installation by using {cluster-manager}, see _Creating a cluster on AWS_ or _Creating a cluster on GCP_. +For detailed steps to configure a cluster-wide proxy during installation by using {cluster-manager}, see _Creating a cluster on AWS_ or _Creating a cluster on {gcp-full}_. endif::openshift-dedicated[] ifdef::openshift-rosa,openshift-rosa-hcp[] diff --git a/modules/configuring-firewall.adoc b/modules/configuring-firewall.adoc index 7a84f2943835..a753b0a8db32 100644 --- a/modules/configuring-firewall.adoc +++ b/modules/configuring-firewall.adoc @@ -243,14 +243,14 @@ Alternatively, if you choose to not use a wildcard for AWS APIs, you must includ |443 |Used to provide access to CloudFront. If you use the AWS Security Token Service (STS) and the private S3 bucket, you must provide access to CloudFront. -.2+|GCP +.2+|{gcp-full} |`*.googleapis.com` |443 -|Required to access GCP services and resources. Review link:https://cloud.google.com/endpoints/[Cloud Endpoints] in the GCP documentation to find the endpoints to allow for your APIs. +|Required to access {gcp-short} services and resources. Review link:https://cloud.google.com/endpoints/[Cloud Endpoints] in the {gcp-short} documentation to find the endpoints to allow for your APIs. |`accounts.google.com` |443 -| Required to access your GCP account. +| Required to access your {gcp-short} account. .3+|Microsoft Azure |`management.azure.com` diff --git a/modules/control-plane-machine-set-operator.adoc b/modules/control-plane-machine-set-operator.adoc index 08eceaf5dd6a..5aa53b71487d 100644 --- a/modules/control-plane-machine-set-operator.adoc +++ b/modules/control-plane-machine-set-operator.adoc @@ -9,7 +9,7 @@ The Control Plane Machine Set Operator automates the management of control plane [NOTE] ==== -This Operator is available for Amazon Web Services (AWS), Google Cloud Platform (GCP), Microsoft Azure, Nutanix, and VMware vSphere. +This Operator is available for Amazon Web Services (AWS), {gcp-first}, Microsoft Azure, Nutanix, and VMware vSphere. ==== diff --git a/modules/cpmso-failure-domains-provider.adoc b/modules/cpmso-failure-domains-provider.adoc index b43fe613bde1..15c796250c91 100644 --- a/modules/cpmso-failure-domains-provider.adoc +++ b/modules/cpmso-failure-domains-provider.adoc @@ -19,7 +19,7 @@ The control plane machine set concept of a failure domain is analogous to existi |X |link:https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html#concepts-availability-zones[Availability Zone (AZ)] -|Google Cloud Platform (GCP) +|{gcp-first} |X |link:https://cloud.google.com/compute/docs/regions-zones[zone] diff --git a/modules/cpmso-limitations.adoc b/modules/cpmso-limitations.adoc index 6c32a239e9c7..e7cc2af26ab8 100644 --- a/modules/cpmso-limitations.adoc +++ b/modules/cpmso-limitations.adoc @@ -8,7 +8,7 @@ The Control Plane Machine Set Operator has the following limitations: -* Only Amazon Web Services (AWS), Google Cloud Platform (GCP), {ibm-power-server-name}, Microsoft Azure, Nutanix, VMware vSphere, and {rh-openstack-first} clusters are supported. +* Only Amazon Web Services (AWS), {gcp-first}, {ibm-power-server-name}, Microsoft Azure, Nutanix, VMware vSphere, and {rh-openstack-first} clusters are supported. * Clusters that do not have preexisting machines that represent the control plane nodes cannot use a control plane machine set or enable the use of a control plane machine set after installation. Generally, preexisting control plane machines are only present if a cluster was installed using infrastructure provisioned by the installation program. + @@ -44,11 +44,11 @@ No resources found in openshift-machine-api namespace. * Deploying Azure control plane machines on Ephemeral OS disks increases risk for data loss and is not supported. -* Deploying control plane machines as AWS Spot Instances, GCP preemptible VMs, or Azure Spot VMs is not supported. +* Deploying control plane machines as AWS Spot Instances, {gcp-short} preemptible VMs, or Azure Spot VMs is not supported. + [IMPORTANT] ==== -Attempting to deploy control plane machines as AWS Spot Instances, GCP preemptible VMs, or Azure Spot VMs might cause the cluster to lose etcd quorum. A cluster that loses all control plane machines simultaneously is unrecoverable. +Attempting to deploy control plane machines as AWS Spot Instances, {gcp-short} preemptible VMs, or Azure Spot VMs might cause the cluster to lose etcd quorum. A cluster that loses all control plane machines simultaneously is unrecoverable. ==== * Making changes to the control plane machine set during or prior to installation is not supported. You must make any changes to the control plane machine set only after installation. diff --git a/modules/cpmso-yaml-failure-domain-gcp.adoc b/modules/cpmso-yaml-failure-domain-gcp.adoc index dc6cb3bc8bdd..e0abe4f3253c 100644 --- a/modules/cpmso-yaml-failure-domain-gcp.adoc +++ b/modules/cpmso-yaml-failure-domain-gcp.adoc @@ -4,13 +4,13 @@ :_mod-docs-content-type: REFERENCE [id="cpmso-yaml-failure-domain-gcp_{context}"] -= Sample GCP failure domain configuration += Sample {gcp-short} failure domain configuration -The control plane machine set concept of a failure domain is analogous to the existing GCP concept of a link:https://cloud.google.com/compute/docs/regions-zones[_zone_]. The `ControlPlaneMachineSet` CR spreads control plane machines across multiple failure domains when possible. +The control plane machine set concept of a failure domain is analogous to the existing {gcp-short} concept of a link:https://cloud.google.com/compute/docs/regions-zones[_zone_]. The `ControlPlaneMachineSet` CR spreads control plane machines across multiple failure domains when possible. -When configuring GCP failure domains in the control plane machine set, you must specify the zone name to use. +When configuring {gcp-short} failure domains in the control plane machine set, you must specify the zone name to use. -.Sample GCP failure domain values +.Sample {gcp-short} failure domain values [source,yaml] ---- apiVersion: machine.openshift.io/v1 @@ -32,6 +32,6 @@ spec: platform: GCP <3> # ... ---- -<1> Specifies a GCP zone for the first failure domain. +<1> Specifies a {gcp-short} zone for the first failure domain. <2> Specifies an additional failure domain. Further failure domains are added the same way. <3> Specifies the cloud provider platform name. Do not change this value. \ No newline at end of file diff --git a/modules/cpmso-yaml-provider-spec-gcp.adoc b/modules/cpmso-yaml-provider-spec-gcp.adoc index c46fa0fa0b7b..8c94929103bd 100644 --- a/modules/cpmso-yaml-provider-spec-gcp.adoc +++ b/modules/cpmso-yaml-provider-spec-gcp.adoc @@ -4,7 +4,7 @@ :_mod-docs-content-type: REFERENCE [id="cpmso-yaml-provider-spec-gcp_{context}"] -= Sample GCP provider specification += Sample {gcp-short} provider specification When you create a control plane machine set for an existing cluster, the provider specification must match the `providerSpec` configuration in the control plane machine custom resource (CR) that the installation program creates. You can omit any field that is set in the failure domain section of the CR. @@ -30,7 +30,7 @@ $ oc -n openshift-machine-api \ get ControlPlaneMachineSet/cluster ---- -.Sample GCP `providerSpec` values +.Sample {gcp-short} `providerSpec` values [source,yaml] ---- apiVersion: machine.openshift.io/v1 @@ -83,7 +83,7 @@ spec: <1> Specifies the secret name for the cluster. Do not change this value. <2> Specifies the path to the image that was used to create the disk. + -To use a GCP Marketplace image, specify the offer to use: +To use a {gcp-short} Marketplace image, specify the offer to use: + -- * {product-title}: `\https://www.googleapis.com/compute/v1/projects/redhat-marketplace-public/global/images/redhat-coreos-ocp-413-x86-64-202305021736` @@ -91,8 +91,8 @@ To use a GCP Marketplace image, specify the offer to use: * {oke}: `\https://www.googleapis.com/compute/v1/projects/redhat-marketplace-public/global/images/redhat-coreos-oke-413-x86-64-202305021736` -- <3> Specifies the cloud provider platform type. Do not change this value. -<4> Specifies the name of the GCP project that you use for your cluster. -<5> Specifies the GCP region for the cluster. +<4> Specifies the name of the {gcp-short} project that you use for your cluster. +<5> Specifies the {gcp-full} region for the cluster. <6> Specifies a single service account. Multiple service accounts are not supported. <7> Specifies the control plane user data secret. Do not change this value. <8> This parameter is configured in the failure domain, and is shown with an empty value here. If a value specified for this parameter differs from the value in the failure domain, the Operator overwrites it with the value in the failure domain. \ No newline at end of file diff --git a/modules/create-wif-cluster-cli.adoc b/modules/create-wif-cluster-cli.adoc index 98645645db97..911aeeda6195 100644 --- a/modules/create-wif-cluster-cli.adoc +++ b/modules/create-wif-cluster-cli.adoc @@ -7,7 +7,7 @@ [id="create-wif-cluster-cli_{context}"] = Creating a Workload Identity Federation cluster using the OCM CLI -You can create an {product-title} on {GCP} cluster with Workload Identity Federation (WIF) using the OpenShift Cluster Manager CLI (`ocm`) in interactive or non-interactive mode. +You can create an {product-title} on {gcp-full} cluster with Workload Identity Federation (WIF) using the OpenShift Cluster Manager CLI (`ocm`) in interactive or non-interactive mode. [NOTE] ==== @@ -49,7 +49,7 @@ $ ocm gcp create wif-config --name \ <1> --federated-project <4> ---- <1> Replace `` with the name of your WIF configuration. -<2> Replace `` with the ID of the {GCP} project where the WIF configuration will be implemented. +<2> Replace `` with the ID of the {gcp-full} project where the WIF configuration will be implemented. <3> Optional: Replace `` with the desired {product-title} version the wif-config will need to support. If you do not specify a version, the wif-config will support the latest {product-title} y-stream version as well as the last three supported {product-title} y-stream versions (beginning with version 4.17). <4> Optional: Replace `` with the ID of the dedicated project where the workload identity pools and providers will be created and managed. If the `--federated-project` flag is not specified, the workload identity pools and providers will be created and managed in the project specified by the `--project` flag. @@ -57,7 +57,7 @@ $ ocm gcp create wif-config --name \ <1> [NOTE] ===== -Using a dedicated project to create and manage workload identity pools and providers is recommended by {GCP}. +Using a dedicated project to create and manage workload identity pools and providers is recommended by {gcp-full}. Using a dedicated project helps you to establish centralized governance over the configuration of workload identity pools and providers, enforce uniform attribute mappings and conditions throughout all projects and applications, and ensure that only authorized identity providers can authenticate with WIF. For more information, see link:https://cloud.google.com/iam/docs/best-practices-for-using-workload-identity-federation#dedicated-project[Use a dedicated project to manage workload identity pools and providers]. @@ -98,7 +98,7 @@ $ ocm gcp create wif-config --name \ <1> --mode=manual ---- <1> Replace `` with the name of your WIF configuration. -<2> Replace `` with the ID of the {GCP} project where the WIF configuration will be implemented. +<2> Replace `` with the ID of the {gcp-full} project where the WIF configuration will be implemented. + Once the WIF is configured, the following service accounts, roles, and groups are created. + @@ -112,7 +112,7 @@ Red{nbsp}Hat custom roles are versioned with every OpenShift y-stream release, f |=== |Service Account/Group -|GCP pre-defined roles and Red Hat custom roles +|{gcp-full} pre-defined roles and Red Hat custom roles |osd-deployer @@ -168,7 +168,7 @@ In `interactive` mode, cluster attributes are displayed automatically as prompts In `non-interactive` mode, you specify the values for specific parameters within the command. -* Based on your mode preference, run one of the following commands to create an {product-title} on (GCP) cluster with WIF configuration: +* Based on your mode preference, run one of the following commands to create an {product-title} on ({gcp-full}) cluster with WIF configuration: ** Create a cluster in interactive mode by running the following command: + @@ -206,7 +206,7 @@ $ ocm create cluster \ <1> <2> Set value to `gcp`. <3> Set value to `true`. <4> Replace `` with the name of your WIF configuration. -<5> Replace `` with the {GCP} region where the new cluster will be deployed. +<5> Replace `` with the {gcp-full} region where the new cluster will be deployed. <6> Optional: The subscription billing model for the cluster. <7> Optional: If you provided a value of `marketplace-gcp` for the `subscription-type` parameter, `marketplace-gcp-terms` must be equal to `true`. <8> Optional: The desired {product-title} version. @@ -214,7 +214,7 @@ $ ocm create cluster \ <1> <10> Optional: Enable autoscaling of compute nodes. <11> Optional: Minimum number of compute nodes. <12> Optional: Maximum number of compute nodes. -<13> Optional: Secure Boot enables the use of Shielded VMs in the Google Cloud Platform. +<13> Optional: Secure Boot enables the use of Shielded VMs in {gcp-full}. <14> Optional: Replace `` with the name of the channel group you want to assign the cluster to. Channel group options include `stable` and `eus`. @@ -225,7 +225,7 @@ If an {product-title} version is specified, the version must also be supported b [IMPORTANT] ==== -If your cluster deployment fails during installation, certain resources created during the installation process are not automatically removed from your {GCP} account. To remove these resources from your GCP account, you must delete the failed cluster. +If your cluster deployment fails during installation, certain resources created during the installation process are not automatically removed from your {gcp-short} account. To remove these resources from your {gcp-short} account, you must delete the failed cluster. ==== [id="ocm-cli-list-wif-commands_{context}"] diff --git a/modules/create-wif-cluster-ocm.adoc b/modules/create-wif-cluster-ocm.adoc index f72b83db3025..fd48fc5af4ec 100644 --- a/modules/create-wif-cluster-ocm.adoc +++ b/modules/create-wif-cluster-ocm.adoc @@ -19,7 +19,7 @@ .. Select the *Customer cloud subscription* infrastructure type. .. Click *Next*. -. Select *Run on Google Cloud Platform*. +. Select *Run on {gcp-full}*. . Select *Workload Identity Federation* as the Authentication type. + [NOTE] @@ -78,7 +78,7 @@ Workload Identity Federation (WIF) is only supported on {product-title} version + [IMPORTANT] ==== -To successfully create a cluster, you must select *Enable Secure Boot support for Shielded VMs* if your organization has the policy constraint `constraints/compute.requireShieldedVm` enabled. For more information regarding GCP organizational policy constraints, see link:https://cloud.google.com/resource-manager/docs/organization-policy/org-policy-constraints[Organization policy constraints]. +To successfully create a cluster, you must select *Enable Secure Boot support for Shielded VMs* if your organization has the policy constraint `constraints/compute.requireShieldedVm` enabled. For more information regarding {gcp-short} organizational policy constraints, see link:https://cloud.google.com/resource-manager/docs/organization-policy/org-policy-constraints[Organization policy constraints]. ==== + [IMPORTANT] @@ -129,7 +129,7 @@ This step refers to labels within Kubernetes, not Google Cloud. For more informa . In the *Cluster privacy* dialog, select *Public* or *Private* to use either public or private API endpoints and application routes for your cluster. If you select *Private*, *Use Private Service Connect* is selected by default, and cannot be disabled. Private Service Connect (PSC) is Google Cloud’s security-enhanced networking feature. -. Optional: To install the cluster in an existing GCP Virtual Private Cloud (VPC): +. Optional: To install the cluster in an existing {gcp-short} Virtual Private Cloud (VPC): .. Select *Install into an existing VPC*. + [IMPORTANT] @@ -155,7 +155,7 @@ For more information about custom application ingress settings, click on the inf . Click *Next*. -. Optional: To install the cluster into a GCP Shared VPC, follow these steps. +. Optional: To install the cluster into a {gcp-short} Shared VPC, follow these steps. + -- include::snippets/install-cluster-in-vpc.adoc[] @@ -173,11 +173,11 @@ The VPC owner of the host project has 30 days to grant the listed permissions be For more information, see link:https://cloud.google.com/vpc/docs/provisioning-shared-vpc#set-up-shared-vpc[Enable a host project] and link:https://cloud.google.com/vpc/docs/provisioning-shared-vpc#migs-service-accounts[Provision Shared VPC]. ==== -.. Select *Install into GCP Shared VPC*. +.. Select *Install into {gcp-short} Shared VPC*. .. Specify the *Host project ID*. If the specified host project ID is incorrect, cluster creation fails. + -.. If you opted to install the cluster in an existing GCP VPC, provide your *Virtual Private Cloud (VPC) subnet settings* and select *Next*. +.. If you opted to install the cluster in an existing {gcp-short} VPC, provide your *Virtual Private Cloud (VPC) subnet settings* and select *Next*. You must have created the Cloud network address translation (NAT) and a Cloud router. See _Additional resources_ for information about Cloud NATs and Google VPCs. + [NOTE] @@ -241,7 +241,7 @@ By default, clusters are created with the delete protection feature disabled. [IMPORTANT] ==== -If your cluster deployment fails during installation, certain resources created during the installation process are not automatically removed from your {GCP} account. To remove these resources from your GCP account, you must delete the failed cluster. +If your cluster deployment fails during installation, certain resources created during the installation process are not automatically removed from your {GCP} account. To remove these resources from your {gcp-short} account, you must delete the failed cluster. ==== ifeval::["{context}" == "osd-creating-a-cluster-on-aws"] :!osd-on-aws: diff --git a/modules/creating-a-machine-pool-ocm.adoc b/modules/creating-a-machine-pool-ocm.adoc index 4b08ff2a8350..af4ec738b10d 100644 --- a/modules/creating-a-machine-pool-ocm.adoc +++ b/modules/creating-a-machine-pool-ocm.adoc @@ -149,7 +149,7 @@ If you select *Use Amazon EC2 Spot Instances* for a machine pool, you cannot dis + endif::openshift-rosa-hcp[] ifdef::openshift-dedicated[] -. Optional: By default, {product-title} on {GCP} instances in the machine pools inherit the Shielded VM settings at the cluster level. You can override the cluster level Shielded VM settings at the machine pool level by selecting or clearing the *Enable Secure Boot support for Shielded VMs* checkbox. +. Optional: By default, {product-title} on {gcp-full} instances in the machine pools inherit the Shielded VM settings at the cluster level. You can override the cluster level Shielded VM settings at the machine pool level by selecting or clearing the *Enable Secure Boot support for Shielded VMs* checkbox. + [IMPORTANT] ==== @@ -158,7 +158,7 @@ Once a machine pool is created, the *Enable Secure Boot support for Shielded VMs + [IMPORTANT] ==== -*Enable Secure Boot support for Shielded VMs* is not supported for {product-title} on {GCP} clusters created using bare-metal instance types. For more information, see link:https://cloud.google.com/compute/shielded-vm/docs/shielded-vm#limitations[Limitations] in the Google Cloud documentation. +*Enable Secure Boot support for Shielded VMs* is not supported for {product-title} on {gcp-full} clusters created using bare-metal instance types. For more information, see link:https://cloud.google.com/compute/shielded-vm/docs/shielded-vm#limitations[Limitations] in the Google Cloud documentation. ==== endif::openshift-dedicated[] + diff --git a/modules/creating-recurring-etcd-backups.adoc b/modules/creating-recurring-etcd-backups.adoc index 7404588004ea..1c1004a63703 100644 --- a/modules/creating-recurring-etcd-backups.adoc +++ b/modules/creating-recurring-etcd-backups.adoc @@ -51,7 +51,7 @@ Each of the following providers require changes to the `accessModes` and `storag |`- ReadWriteMany` |`efs-sc` -|Google Cloud Platform +|{gcp-full} |`- ReadWriteMany` |`filestore-csi` diff --git a/modules/deleting-cluster.adoc b/modules/deleting-cluster.adoc index 7b7422a6b48f..c0381f87f031 100644 --- a/modules/deleting-cluster.adoc +++ b/modules/deleting-cluster.adoc @@ -25,5 +25,5 @@ You can delete your {product-title} cluster in {cluster-manager-first}. + [NOTE] ==== -If you delete a cluster that was installed into a GCP Shared VPC, inform the VPC owner of the host project to remove the IAM policy roles granted to the service account that was referenced during cluster creation. +If you delete a cluster that was installed into a {gcp-short} Shared VPC, inform the VPC owner of the host project to remove the IAM policy roles granted to the service account that was referenced during cluster creation. ==== \ No newline at end of file diff --git a/modules/distr-tracing-tempo-object-storage-setup-gcp-sts-install.adoc b/modules/distr-tracing-tempo-object-storage-setup-gcp-sts-install.adoc index b73f92426db9..be8726f4337c 100644 --- a/modules/distr-tracing-tempo-object-storage-setup-gcp-sts-install.adoc +++ b/modules/distr-tracing-tempo-object-storage-setup-gcp-sts-install.adoc @@ -17,7 +17,7 @@ include::snippets/technology-preview.adoc[leveloffset=+1] .Procedure -. Create a GCS bucket on the Google Cloud Platform (GCP). +. Create a GCS bucket on the {gcp-first}. . Create or reuse a service account with Google's Identity and Access Management (IAM): + @@ -29,10 +29,10 @@ SERVICE_ACCOUNT_EMAIL=$(gcloud iam service-accounts create The name of the service account on the GCP. -<2> The project ID of the service account on the GCP. +<1> The name of the service account on the {gcp-short}. +<2> The project ID of the service account on the {gcp-full}. -. Bind the required GCP roles to the created service account at the project level. You can do this by running the following command: +. Bind the required {gcp-short} roles to the created service account at the project level. You can do this by running the following command: + [source,terminal] ---- diff --git a/modules/gcp-limits.adoc b/modules/gcp-limits.adoc index 221c65e249c8..aa877852c949 100644 --- a/modules/gcp-limits.adoc +++ b/modules/gcp-limits.adoc @@ -3,10 +3,10 @@ // * osd_planning/gcp-ccs.adoc [id="gcp-limits_{context}"] -= GCP account limits += {gcp-short} account limits -The {product-title} cluster uses a number of Google Cloud Platform (GCP) components, but the default link:https://cloud.google.com/docs/quota[quotas] do not affect your ability to install an {product-title} cluster. +The {product-title} cluster uses a number of {gcp-first} components, but the default link:https://cloud.google.com/docs/quota[quotas] do not affect your ability to install an {product-title} cluster. A standard {product-title} cluster uses the following resources. Note that some resources are required only during the bootstrap process and are removed after the cluster deploys. diff --git a/modules/install-sno-installing-sno-on-gcp.adoc b/modules/install-sno-installing-sno-on-gcp.adoc index 2ca8ff93e7a4..2560adeb765d 100644 --- a/modules/install-sno-installing-sno-on-gcp.adoc +++ b/modules/install-sno-installing-sno-on-gcp.adoc @@ -5,10 +5,10 @@ :_mod-docs-content-type: CONCEPT [id="installing-sno-on-gcp_{context}"] ifndef::openshift-origin[] -= Installing {sno} on GCP += Installing {sno} on {gcp-full} endif::openshift-origin[] ifdef::openshift-origin[] -= Installing {sno-okd} on GCP += Installing {sno-okd} on {gcp-full} endif::openshift-origin[] -Installing a single node cluster on GCP requires installer-provisioned installation using the "Installing a cluster on GCP with customizations" procedure. +Installing a single node cluster on {gcp-short} requires installer-provisioned installation using the "Installing a cluster on {gcp-short} with customizations" procedure. diff --git a/modules/install-sno-supported-cloud-providers-for-single-node-openshift.adoc b/modules/install-sno-supported-cloud-providers-for-single-node-openshift.adoc index fb5a50aa1038..5d0452f6f22b 100644 --- a/modules/install-sno-supported-cloud-providers-for-single-node-openshift.adoc +++ b/modules/install-sno-supported-cloud-providers-for-single-node-openshift.adoc @@ -19,5 +19,5 @@ The following table contains a list of supported cloud providers and CPU archite |Cloud provider |CPU architecture |Amazon Web Service (AWS)|x86_64 and AArch64 |Microsoft Azure|x86_64 -|Google Cloud Platform (GCP) | x86_64 and AArch64 +|{gcp-first} | x86_64 and AArch64 |==== diff --git a/modules/installation-about-custom-gcp-vpc.adoc b/modules/installation-about-custom-gcp-vpc.adoc index 32b2d894068c..287eccd01458 100644 --- a/modules/installation-about-custom-gcp-vpc.adoc +++ b/modules/installation-about-custom-gcp-vpc.adoc @@ -6,9 +6,9 @@ [id="installation-about-custom-gcp-vpc_{context}"] = About using a custom VPC -In {product-title} {product-version}, you can deploy a cluster into an existing VPC in Google Cloud Platform (GCP). If you do, you must also use existing subnets within the VPC and routing rules. +In {product-title} {product-version}, you can deploy a cluster into an existing VPC in {gcp-first}. If you do, you must also use existing subnets within the VPC and routing rules. -By deploying {product-title} into an existing GCP VPC, you might be able to avoid limit constraints in new accounts or more easily abide by the operational constraints that your company's guidelines set. This is a good option to use if you cannot obtain the infrastructure creation permissions that are required to create the VPC yourself. +By deploying {product-title} into an existing {gcp-short} VPC, you might be able to avoid limit constraints in new accounts or more easily abide by the operational constraints that your company's guidelines set. This is a good option to use if you cannot obtain the infrastructure creation permissions that are required to create the VPC yourself. [id="installation-about-custom-gcp-vpcs-requirements_{context}"] == Requirements for using your VPC @@ -25,7 +25,7 @@ If you use a custom VPC, you must correctly configure it and its subnets for the Your VPC and subnets must meet the following characteristics: -* The VPC must be in the same GCP project that you deploy the {product-title} cluster to. +* The VPC must be in the same {gcp-short} project that you deploy the {product-title} cluster to. * To allow access to the internet from the control plane and compute machines, you must configure cloud NAT on the subnets to allow egress to it. These machines do not have a public address. Even if you do not require access to the internet, you must allow egress to the VPC network to obtain the installation program and images. Because multiple cloud NATs cannot be configured on the shared subnets, the installation program cannot configure it. To ensure that the subnets that you provide are suitable, the installation program confirms the following data: @@ -41,7 +41,7 @@ If you destroy a cluster that uses an existing VPC, the VPC is not deleted. Starting with {product-title} 4.3, you do not need all of the permissions that are required for an installation program-provisioned infrastructure cluster to deploy a cluster. This change mimics the division of permissions that you might have at your company: some individuals can create different resources in your clouds than others. For example, you might be able to create application-specific items, like instances, buckets, and load balancers, but not networking-related components such as VPCs, subnets, or Ingress rules. -The GCP credentials that you use when you create your cluster do not need the networking permissions that are required to make VPCs and core networking components within the VPC, such as subnets, routing tables, internet gateways, NAT, and VPN. You still need permission to make the application resources that the machines within the cluster require, such as load balancers, security groups, storage, and nodes. +The {gcp-short} credentials that you use when you create your cluster do not need the networking permissions that are required to make VPCs and core networking components within the VPC, such as subnets, routing tables, internet gateways, NAT, and VPN. You still need permission to make the application resources that the machines within the cluster require, such as load balancers, security groups, storage, and nodes. [id="installation-about-custom-gcp-vpcs-isolation_{context}"] == Isolation between clusters diff --git a/modules/installation-configuration-parameters.adoc b/modules/installation-configuration-parameters.adoc index 62a1c57428bb..708e2c53e2a9 100644 --- a/modules/installation-configuration-parameters.adoc +++ b/modules/installation-configuration-parameters.adoc @@ -880,7 +880,7 @@ ifdef::aws,gcp[] [NOTE] ==== ifdef::aws[If your AWS account has service control policies (SCP) enabled, you must configure the `credentialsMode` parameter to `Mint`, `Passthrough`, or `Manual`.] -ifdef::gcp[If you are installing on GCP into a shared virtual private cloud (VPC), `credentialsMode` must be set to `Passthrough` or `Manual`.] +ifdef::gcp[If you are installing on {gcp-short} into a shared virtual private cloud (VPC), `credentialsMode` must be set to `Passthrough` or `Manual`.] ==== endif::aws,gcp[] ifdef::aws,gcp,azure[] @@ -2661,11 +2661,11 @@ endif::agent[] ifdef::gcp[] [id="installation-configuration-parameters-additional-gcp_{context}"] -== Additional Google Cloud Platform (GCP) configuration parameters +== Additional {gcp-first} configuration parameters -Additional GCP configuration parameters are described in the following table: +Additional {gcp-short} configuration parameters are described in the following table: -.Additional GCP parameters +.Additional {gcp-short} parameters [cols=".^l,.^a",options="header"] |==== |Parameter|Description @@ -2675,9 +2675,9 @@ Additional GCP configuration parameters are described in the following table: gcp: osImage: project: -|Optional. By default, the installation program downloads and installs the {op-system-first} image that is used to boot control plane machines. You can override the default behavior by specifying the location of a custom {op-system} image that the installation program is to use for control plane machines only. Control plane machines do not contribute to licensing costs when using the default image. But, if you apply a GCP Marketplace image for a control plane machine, usage costs do apply. +|Optional. By default, the installation program downloads and installs the {op-system-first} image that is used to boot control plane machines. You can override the default behavior by specifying the location of a custom {op-system} image that the installation program is to use for control plane machines only. Control plane machines do not contribute to licensing costs when using the default image. But, if you apply a {gcp-short} Marketplace image for a control plane machine, usage costs do apply. -*Value:* String. The name of GCP project where the image is located. +*Value:* String. The name of {gcp-short} project where the image is located. |controlPlane: platform: @@ -2695,7 +2695,7 @@ Additional GCP configuration parameters are described in the following table: project: |Optional. By default, the installation program downloads and installs the {op-system} image that is used to boot compute machines. You can override the default behavior by specifying the location of a custom {op-system} image that the installation program is to use for compute machines only. -*Value:* String. The name of GCP project where the image is located. +*Value:* String. The name of {gcp-short} project where the image is located. |compute: platform: @@ -2717,21 +2717,21 @@ Additional GCP configuration parameters are described in the following table: |platform: gcp: network: -|The name of the existing Virtual Private Cloud (VPC) where you want to deploy your cluster. If you want to deploy your cluster into a shared VPC, you must set `platform.gcp.networkProjectID` with the name of the GCP project that contains the shared VPC. +|The name of the existing Virtual Private Cloud (VPC) where you want to deploy your cluster. If you want to deploy your cluster into a shared VPC, you must set `platform.gcp.networkProjectID` with the name of the {gcp-short} project that contains the shared VPC. *Value:* String. |platform: gcp: networkProjectID: -|Optional. The name of the GCP project that contains the shared VPC where you want to deploy your cluster. +|Optional. The name of the {gcp-short} project that contains the shared VPC where you want to deploy your cluster. *Value:* String. |platform: gcp: projectID: -|The name of the GCP project where the installation program installs the cluster. +|The name of the {gcp-short} project where the installation program installs the cluster. *Value:* String. @@ -2764,7 +2764,7 @@ Additional GCP configuration parameters are described in the following table: |platform: gcp: region: -|The name of the GCP region that hosts your cluster. +|The name of the {gcp-short} region that hosts your cluster. *Value:* Any valid region name, such as `us-central1`. @@ -2788,11 +2788,11 @@ Additional GCP configuration parameters are described in the following table: zones: |The availability zones where the installation program creates machines. -*Value:* A list of valid link:https://cloud.google.com/compute/docs/regions-zones#available[GCP availability zones], such as `us-central1-a`, in a +*Value:* A list of valid link:https://cloud.google.com/compute/docs/regions-zones#available[{gcp-full} availability zones], such as `us-central1-a`, in a link:https://yaml.org/spec/1.2/spec.html#sequence//[YAML sequence]. [IMPORTANT] ==== -When running your cluster on GCP 64-bit ARM infrastructures, ensure that you use a zone where Ampere Altra Arm CPU's are available. You can find which zones are compatible with 64-bit ARM processors in the "GCP availability zones" link. +When running your cluster on {gcp-short} 64-bit ARM infrastructures, ensure that you use a zone where Ampere Altra Arm CPU's are available. You can find which zones are compatible with 64-bit ARM processors in the "{gcp-full} availability zones" link. ==== |platform: @@ -2809,7 +2809,7 @@ When running your cluster on GCP 64-bit ARM infrastructures, ensure that you use defaultMachinePlatform: osDisk: diskType: -|The link:https://cloud.google.com/compute/docs/disks#disk-types[GCP disk type]. +|The link:https://cloud.google.com/compute/docs/disks#disk-types[{gcp-full} disk type]. *Value:* The default disk type for all machines. Valid values are `pd-balanced`, `pd-ssd`, `pd-standard`, or `hyperdisk-balanced`. The default value is `pd-ssd`. Control plane machines cannot use the `pd-standard` disk type, so if you specify `pd-standard` as the default machine platform disk type, you must specify a different disk type using the `controlPlane.platform.gcp.osDisk.diskType` parameter. @@ -2820,7 +2820,7 @@ When running your cluster on GCP 64-bit ARM infrastructures, ensure that you use project: |Optional. By default, the installation program downloads and installs the {op-system} image that is used to boot control plane and compute machines. You can override the default behavior by specifying the location of a custom {op-system} image that the installation program is to use for both types of machines. -*Value:* String. The name of GCP project where the image is located. +*Value:* String. The name of {gcp-short} project where the image is located. |platform: gcp: @@ -2843,9 +2843,9 @@ When running your cluster on GCP 64-bit ARM infrastructures, ensure that you use gcp: defaultMachinePlatform: type: -|The link:https://cloud.google.com/compute/docs/machine-types[GCP machine type] for control plane and compute machines. +|The link:https://cloud.google.com/compute/docs/machine-types[{gcp-full} machine type] for control plane and compute machines. -*Value:* The GCP machine type, for example `n1-standard-4`. +*Value:* The {gcp-short} machine type, for example `n1-standard-4`. |platform: gcp: @@ -2876,9 +2876,9 @@ When running your cluster on GCP 64-bit ARM infrastructures, ensure that you use encryptionKey: kmsKey: location: -|The link:https://cloud.google.com/kms/docs/locations[GCP location] in which the KMS key ring exists. +|The link:https://cloud.google.com/kms/docs/locations[{gcp-full} location] in which the KMS key ring exists. -*Value:* The GCP location. +*Value:* The {gcp-short} location. |platform: gcp: @@ -2889,7 +2889,7 @@ When running your cluster on GCP 64-bit ARM infrastructures, ensure that you use projectID: |The ID of the project in which the KMS key ring exists. This value defaults to the value of the `platform.gcp.projectID` parameter if it is not set. -*Value:* The GCP project ID. +*Value:* The {gcp-short} project ID. |platform: gcp: @@ -2897,9 +2897,9 @@ When running your cluster on GCP 64-bit ARM infrastructures, ensure that you use osDisk: encryptionKey: kmsKeyServiceAccount: -|The GCP service account used for the encryption request for control plane and compute machines. If absent, the Compute Engine default service account is used. For more information about GCP service accounts, see Google's documentation on link:https://cloud.google.com/compute/docs/access/service-accounts#compute_engine_service_account[service accounts]. +|The {gcp-short} service account used for the encryption request for control plane and compute machines. If absent, the Compute Engine default service account is used. For more information about {gcp-short} service accounts, see Google's documentation on link:https://cloud.google.com/compute/docs/access/service-accounts#compute_engine_service_account[service accounts]. -*Value:* The GCP service account email, for example `@.iam.gserviceaccount.com`. +*Value:* The {gcp-short} service account email, for example `@.iam.gserviceaccount.com`. |platform: gcp: @@ -2974,9 +2974,9 @@ If you specify any value other than `Disabled`, you must set `platform.gcp.defau encryptionKey: kmsKey: location: -|For control plane machines, the GCP location in which the key ring exists. For more information about KMS locations, see Google's documentation on link:https://cloud.google.com/kms/docs/locations[Cloud KMS locations]. +|For control plane machines, the {gcp-short} location in which the key ring exists. For more information about KMS locations, see Google's documentation on link:https://cloud.google.com/kms/docs/locations[Cloud KMS locations]. -*Value:* The GCP location for the key ring. +*Value:* The {gcp-short} location for the key ring. |controlPlane: platform: @@ -2987,7 +2987,7 @@ If you specify any value other than `Disabled`, you must set `platform.gcp.defau projectID: |For control plane machines, the ID of the project in which the KMS key ring exists. This value defaults to the VM project ID if not set. -*Value:* The GCP project ID. +*Value:* The {gcp-short} project ID. |controlPlane: platform: @@ -2995,9 +2995,9 @@ If you specify any value other than `Disabled`, you must set `platform.gcp.defau osDisk: encryptionKey: kmsKeyServiceAccount: -|The GCP service account used for the encryption request for control plane machines. If absent, the Compute Engine default service account is used. For more information about GCP service accounts, see Google's documentation on link:https://cloud.google.com/compute/docs/access/service-accounts#compute_engine_service_account[service accounts]. +|The {gcp-short} service account used for the encryption request for control plane machines. If absent, the Compute Engine default service account is used. For more information about {gcp-short} service accounts, see Google's documentation on link:https://cloud.google.com/compute/docs/access/service-accounts#compute_engine_service_account[service accounts]. -*Value:* The GCP service account email, for example `@.iam.gserviceaccount.com`. +*Value:* The {gcp-short} service account email, for example `@.iam.gserviceaccount.com`. |controlPlane: platform: @@ -3013,7 +3013,7 @@ If you specify any value other than `Disabled`, you must set `platform.gcp.defau gcp: osDisk: diskType: -|The link:https://cloud.google.com/compute/docs/disks#disk-types[GCP disk type] for control plane machines. +|The link:https://cloud.google.com/compute/docs/disks#disk-types[{gcp-full} disk type] for control plane machines. *Value:* Valid values are `pd-balanced`, `pd-ssd`, or `hyperdisk-balanced`. The default value is `pd-ssd`. @@ -3029,9 +3029,9 @@ If you specify any value other than `Disabled`, you must set `platform.gcp.defau platform: gcp: type: -|The link:https://cloud.google.com/compute/docs/machine-types[GCP machine type] for control plane machines. If set, this parameter overrides the `platform.gcp.defaultMachinePlatform.type` parameter. +|The link:https://cloud.google.com/compute/docs/machine-types[{gcp-full} machine type] for control plane machines. If set, this parameter overrides the `platform.gcp.defaultMachinePlatform.type` parameter. -*Value:* The GCP machine type, for example `n1-standard-4`. +*Value:* The {gcp-short} machine type, for example `n1-standard-4`. |controlPlane: platform: @@ -3039,11 +3039,11 @@ If you specify any value other than `Disabled`, you must set `platform.gcp.defau zones: |The availability zones where the installation program creates control plane machines. -*Value:* A list of valid link:https://cloud.google.com/compute/docs/regions-zones#available[GCP availability zones], such as `us-central1-a`, in a +*Value:* A list of valid link:https://cloud.google.com/compute/docs/regions-zones#available[{gcp-full} availability zones], such as `us-central1-a`, in a link:https://yaml.org/spec/1.2/spec.html#sequence//[YAML sequence]. [IMPORTANT] ==== -When running your cluster on GCP 64-bit ARM infrastructures, ensure that you use a zone where Ampere Altra Arm CPU's are available. You can find which zones are compatible with 64-bit ARM processors in the "GCP availability zones" link. +When running your cluster on {gcp-short} 64-bit ARM infrastructures, ensure that you use a zone where Ampere Altra Arm CPU's are available. You can find which zones are compatible with 64-bit ARM processors in the "{gcp-full} availability zones" link. ==== |controlPlane: @@ -3131,9 +3131,9 @@ In the case of shared VPC installations, when the service account is not provide encryptionKey: kmsKey: location: -|For compute machines, the GCP location in which the key ring exists. For more information about KMS locations, see Google's documentation on link:https://cloud.google.com/kms/docs/locations[Cloud KMS locations]. +|For compute machines, the {gcp-short} location in which the key ring exists. For more information about KMS locations, see Google's documentation on link:https://cloud.google.com/kms/docs/locations[Cloud KMS locations]. -*Value:* The GCP location for the key ring. +*Value:* The {gcp-short} location for the key ring. |compute: platform: @@ -3144,7 +3144,7 @@ In the case of shared VPC installations, when the service account is not provide projectID: |For compute machines, the ID of the project in which the KMS key ring exists. This value defaults to the VM project ID if not set. -*Value:* The GCP project ID. +*Value:* The {gcp-short} project ID. |compute: platform: @@ -3152,9 +3152,9 @@ In the case of shared VPC installations, when the service account is not provide osDisk: encryptionKey: kmsKeyServiceAccount: -|The GCP service account used for the encryption request for compute machines. If this value is not set, the Compute Engine default service account is used. For more information about GCP service accounts, see Google's documentation on link:https://cloud.google.com/compute/docs/access/service-accounts#compute_engine_service_account[service accounts]. +|The {gcp-short} service account used for the encryption request for compute machines. If this value is not set, the Compute Engine default service account is used. For more information about {gcp-short} service accounts, see Google's documentation on link:https://cloud.google.com/compute/docs/access/service-accounts#compute_engine_service_account[service accounts]. -*Value:* The GCP service account email, for example `@.iam.gserviceaccount.com`. +*Value:* The {gcp-short} service account email, for example `@.iam.gserviceaccount.com`. |compute: platform: @@ -3170,7 +3170,7 @@ In the case of shared VPC installations, when the service account is not provide gcp: osDisk: diskType: -|The link:https://cloud.google.com/compute/docs/disks#disk-types[GCP disk type] for compute machines. +|The link:https://cloud.google.com/compute/docs/disks#disk-types[{gcp-full} disk type] for compute machines. *Value:* Valid values are `pd-balanced`, `pd-ssd`, `pd-standard`, or `hyperdisk-balanced`. The default value is `pd-ssd`. @@ -3186,9 +3186,9 @@ In the case of shared VPC installations, when the service account is not provide platform: gcp: type: -|The link:https://cloud.google.com/compute/docs/machine-types[GCP machine type] for compute machines. If set, this parameter overrides the `platform.gcp.defaultMachinePlatform.type` parameter. +|The link:https://cloud.google.com/compute/docs/machine-types[{gcp-full} machine type] for compute machines. If set, this parameter overrides the `platform.gcp.defaultMachinePlatform.type` parameter. -*Value:* The GCP machine type, for example `n1-standard-4`. +*Value:* The {gcp-short} machine type, for example `n1-standard-4`. |compute: platform: @@ -3196,11 +3196,11 @@ In the case of shared VPC installations, when the service account is not provide zones: |The availability zones where the installation program creates compute machines. -*Value:* A list of valid link:https://cloud.google.com/compute/docs/regions-zones#available[GCP availability zones], such as `us-central1-a`, in a +*Value:* A list of valid link:https://cloud.google.com/compute/docs/regions-zones#available[{gcp-full} availability zones], such as `us-central1-a`, in a link:https://yaml.org/spec/1.2/spec.html#sequence//[YAML sequence]. [IMPORTANT] ==== -When running your cluster on GCP 64-bit ARM infrastructures, ensure that you use a zone where Ampere Altra Arm CPU's are available. You can find which zones are compatible with 64-bit ARM processors in the "GCP availability zones" link. +When running your cluster on {gcp-short} 64-bit ARM infrastructures, ensure that you use a zone where Ampere Altra Arm CPU's are available. You can find which zones are compatible with 64-bit ARM processors in the "{gcp-full} availability zones" link. ==== |compute: diff --git a/modules/installation-configure-proxy.adoc b/modules/installation-configure-proxy.adoc index 9a0bd6e43435..1af2e887b805 100644 --- a/modules/installation-configure-proxy.adoc +++ b/modules/installation-configure-proxy.adoc @@ -152,7 +152,7 @@ endif::gcp[] ==== The `Proxy` object `status.noProxy` field is populated with the values of the `networking.machineNetwork[].cidr`, `networking.clusterNetwork[].cidr`, and `networking.serviceNetwork[]` fields from your installation configuration. -For installations on Amazon Web Services (AWS), Google Cloud Platform (GCP), Microsoft Azure, and {rh-openstack-first}, the `Proxy` object `status.noProxy` field is also populated with the instance metadata endpoint (`169.254.169.254`). +For installations on Amazon Web Services (AWS), {gcp-first}, Microsoft Azure, and {rh-openstack-first}, the `Proxy` object `status.noProxy` field is also populated with the instance metadata endpoint (`169.254.169.254`). ==== .Procedure diff --git a/modules/installation-creating-gcp-bootstrap.adoc b/modules/installation-creating-gcp-bootstrap.adoc index b11de7393d4e..f2712685dfc9 100644 --- a/modules/installation-creating-gcp-bootstrap.adoc +++ b/modules/installation-creating-gcp-bootstrap.adoc @@ -10,9 +10,9 @@ endif::[] :_mod-docs-content-type: PROCEDURE [id="installation-creating-gcp-bootstrap_{context}"] -= Creating the bootstrap machine in GCP += Creating the bootstrap machine in {gcp-full} -You must create the bootstrap machine in Google Cloud Platform (GCP) to use during +You must create the bootstrap machine in {gcp-first} to use during {product-title} cluster initialization. One way to create this machine is to modify the provided Deployment Manager template. @@ -26,7 +26,7 @@ have to contact Red Hat support with your installation logs. .Prerequisites -* Ensure you defined the variables in the _Exporting common variables_ and _Creating load balancers in GCP_ sections. +* Ensure you defined the variables in the _Exporting common variables_ and _Creating load balancers in {gcp-full}_ sections. * Ensure you installed pyOpenSSL. .Procedure diff --git a/modules/installation-creating-gcp-control-plane.adoc b/modules/installation-creating-gcp-control-plane.adoc index db27eb1cd879..fe91d017520c 100644 --- a/modules/installation-creating-gcp-control-plane.adoc +++ b/modules/installation-creating-gcp-control-plane.adoc @@ -10,9 +10,9 @@ endif::[] :_mod-docs-content-type: PROCEDURE [id="installation-creating-gcp-control-plane_{context}"] -= Creating the control plane machines in GCP += Creating the control plane machines in {gcp-full} -You must create the control plane machines in Google Cloud Platform (GCP) for +You must create the control plane machines in {gcp-first} for your cluster to use. One way to create these machines is to modify the provided Deployment Manager template. @@ -26,7 +26,7 @@ might have to contact Red Hat support with your installation logs. .Prerequisites -* Ensure you defined the variables in the _Exporting common variables_, _Creating load balancers in GCP_, _Creating IAM roles in GCP_, and _Creating the bootstrap machine in GCP_ sections. +* Ensure you defined the variables in the _Exporting common variables_, _Creating load balancers in {gcp-full}_, _Creating IAM roles in {gcp-full}_, and _Creating the bootstrap machine in {gcp-full}_ sections. * Create the bootstrap machine. .Procedure diff --git a/modules/installation-creating-gcp-firewall-rules-vpc.adoc b/modules/installation-creating-gcp-firewall-rules-vpc.adoc index 8413256ab5ee..efa288ae1e81 100644 --- a/modules/installation-creating-gcp-firewall-rules-vpc.adoc +++ b/modules/installation-creating-gcp-firewall-rules-vpc.adoc @@ -9,15 +9,15 @@ endif::[] :_mod-docs-content-type: PROCEDURE [id="installation-creating-gcp-firewall-rules-vpc_{context}"] -= Creating firewall rules in GCP += Creating firewall rules in {gcp-full} -You must create firewall rules in Google Cloud Platform (GCP) for your +You must create firewall rules in {gcp-first} for your {product-title} cluster to use. One way to create these components is to modify the provided Deployment Manager template. [NOTE] ==== -If you do not use the provided Deployment Manager template to create your GCP +If you do not use the provided Deployment Manager template to create your {gcp-full} infrastructure, you must review the provided information and manually create the infrastructure. If your cluster does not initialize correctly, you might have to contact Red Hat support with your installation logs. @@ -25,7 +25,7 @@ have to contact Red Hat support with your installation logs. .Prerequisites -* Ensure you defined the variables in the _Exporting common variables_ and _Creating load balancers in GCP_ sections. +* Ensure you defined the variables in the _Exporting common variables_ and _Creating load balancers in {gcp-full}_ sections. .Procedure diff --git a/modules/installation-creating-gcp-iam-shared-vpc.adoc b/modules/installation-creating-gcp-iam-shared-vpc.adoc index 77f214beb91c..5a027efdb5da 100644 --- a/modules/installation-creating-gcp-iam-shared-vpc.adoc +++ b/modules/installation-creating-gcp-iam-shared-vpc.adoc @@ -9,9 +9,9 @@ endif::[] :_mod-docs-content-type: PROCEDURE [id="installation-creating-gcp-iam-shared-vpc_{context}"] -= Creating IAM roles in GCP += Creating IAM roles in {gcp-full} -You must create IAM roles in Google Cloud Platform (GCP) for your +You must create IAM roles in {gcp-first} for your {product-title} cluster to use. One way to create these components is to modify the provided Deployment Manager template. diff --git a/modules/installation-creating-gcp-lb.adoc b/modules/installation-creating-gcp-lb.adoc index d138fcb66784..5315fbfdc5ee 100644 --- a/modules/installation-creating-gcp-lb.adoc +++ b/modules/installation-creating-gcp-lb.adoc @@ -9,15 +9,15 @@ endif::[] :_mod-docs-content-type: PROCEDURE [id="installation-creating-gcp-lb_{context}"] -= Creating load balancers in GCP += Creating load balancers in {gcp-full} -You must configure load balancers in Google Cloud Platform (GCP) for your +You must configure load balancers in {gcp-first} for your {product-title} cluster to use. One way to create these components is to modify the provided Deployment Manager template. [NOTE] ==== -If you do not use the provided Deployment Manager template to create your GCP +If you do not use the provided Deployment Manager template to create your {gcp-full} infrastructure, you must review the provided information and manually create the infrastructure. If your cluster does not initialize correctly, you might have to contact Red Hat support with your installation logs. diff --git a/modules/installation-creating-gcp-private-dns.adoc b/modules/installation-creating-gcp-private-dns.adoc index 1b2c5acb6120..4f9baeceedc6 100644 --- a/modules/installation-creating-gcp-private-dns.adoc +++ b/modules/installation-creating-gcp-private-dns.adoc @@ -9,9 +9,9 @@ endif::[] :_mod-docs-content-type: PROCEDURE [id="installation-creating-gcp-private-dns_{context}"] -= Creating a private DNS zone in GCP += Creating a private DNS zone in {gcp-full} -You must configure a private DNS zone in Google Cloud Platform (GCP) for your +You must configure a private DNS zone in {gcp-first} for your {product-title} cluster to use. One way to create this component is to modify the provided Deployment Manager template. @@ -25,7 +25,7 @@ have to contact Red Hat support with your installation logs. .Prerequisites -* Ensure you defined the variables in the _Exporting common variables_ and _Creating load balancers in GCP_ sections. +* Ensure you defined the variables in the _Exporting common variables_ and _Creating load balancers in {gcp-full}_ sections. .Procedure diff --git a/modules/installation-creating-gcp-shared-vpc-cluster-wide-firewall-rules.adoc b/modules/installation-creating-gcp-shared-vpc-cluster-wide-firewall-rules.adoc index dfab22252e5d..90241e5b1b12 100644 --- a/modules/installation-creating-gcp-shared-vpc-cluster-wide-firewall-rules.adoc +++ b/modules/installation-creating-gcp-shared-vpc-cluster-wide-firewall-rules.adoc @@ -4,7 +4,7 @@ :_mod-docs-content-type: PROCEDURE [id="installation-creating-gcp-shared-vpc-cluster-wide-firewall-rules_{context}"] -= Creating cluster-wide firewall rules for a shared VPC in GCP += Creating cluster-wide firewall rules for a shared VPC in {gcp-full} You can create cluster-wide firewall rules to allow the access that the {product-title} cluster requires. @@ -16,7 +16,7 @@ If you do not choose to create firewall rules based on cluster events, you must .Prerequisites * You exported the variables that the Deployment Manager templates require to deploy your cluster. -* You created the networking and load balancing components in GCP that your cluster requires. +* You created the networking and load balancing components in {gcp-short} that your cluster requires. .Procedure diff --git a/modules/installation-creating-gcp-vpc.adoc b/modules/installation-creating-gcp-vpc.adoc index 1d52068923b2..39a01112bb95 100644 --- a/modules/installation-creating-gcp-vpc.adoc +++ b/modules/installation-creating-gcp-vpc.adoc @@ -10,9 +10,9 @@ endif::[] :_mod-docs-content-type: PROCEDURE [id="installation-creating-gcp-vpc_{context}"] -= Creating a VPC in GCP += Creating a VPC in {gcp-full} -You must create a VPC in Google Cloud Platform (GCP) for your {product-title} +You must create a VPC in {gcp-first} for your {product-title} cluster to use. You can customize the VPC to meet your requirements. One way to create the VPC is to modify the provided Deployment Manager template. @@ -141,7 +141,7 @@ $ export HOST_PROJECT_CONTROL_SUBNET= $ export HOST_PROJECT_COMPUTE_SUBNET= ---- -. Set up the shared VPC. See link:https://cloud.google.com/vpc/docs/provisioning-shared-vpc#setting_up[Setting up Shared VPC] in the GCP documentation. +. Set up the shared VPC. See link:https://cloud.google.com/vpc/docs/provisioning-shared-vpc#setting_up[Setting up Shared VPC] in the {gcp-short} documentation. endif::shared-vpc[] ifeval::["{context}" == "installing-gcp-user-infra-vpc"] diff --git a/modules/installation-creating-gcp-worker.adoc b/modules/installation-creating-gcp-worker.adoc index b2192eb72ffb..ceb205b0b030 100644 --- a/modules/installation-creating-gcp-worker.adoc +++ b/modules/installation-creating-gcp-worker.adoc @@ -12,9 +12,9 @@ endif::[] :_mod-docs-content-type: PROCEDURE [id="installation-creating-gcp-worker_{context}"] -= Creating additional worker machines in GCP += Creating additional worker machines in {gcp-full} -You can create worker machines in Google Cloud Platform (GCP) for your cluster +You can create worker machines in {gcp-first} for your cluster to use by launching individual instances discretely or by automated processes outside the cluster, such as auto scaling groups. You can also take advantage of the built-in cluster scaling mechanisms and the machine API in {product-title}. @@ -40,7 +40,7 @@ have to contact Red Hat support with your installation logs. .Prerequisites -* Ensure you defined the variables in the _Exporting common variables_, _Creating load balancers in GCP_, and _Creating the bootstrap machine in GCP_ sections. +* Ensure you defined the variables in the _Exporting common variables_, _Creating load balancers in {gcp-full}_, and _Creating the bootstrap machine in {gcp-full}_ sections. * Create the bootstrap machine. * Create the control plane machines. @@ -135,7 +135,7 @@ $ gcloud deployment-manager deployments create ${INFRA_ID}-worker --config 06_wo [.small] -- -1. To use a GCP Marketplace image, specify the offer to use: +1. To use a {gcp-short} Marketplace image, specify the offer to use: ** {product-title}: `\https://www.googleapis.com/compute/v1/projects/redhat-marketplace-public/global/images/redhat-coreos-ocp-413-x86-64-202305021736` ** {opp}: `\https://www.googleapis.com/compute/v1/projects/redhat-marketplace-public/global/images/redhat-coreos-opp-413-x86-64-202305021736` ** {oke}: `\https://www.googleapis.com/compute/v1/projects/redhat-marketplace-public/global/images/redhat-coreos-oke-413-x86-64-202305021736` diff --git a/modules/installation-custom-gcp-vpc.adoc b/modules/installation-custom-gcp-vpc.adoc index 32c7fa90c054..ff30a48c187f 100644 --- a/modules/installation-custom-gcp-vpc.adoc +++ b/modules/installation-custom-gcp-vpc.adoc @@ -6,7 +6,7 @@ [id="installation-custom-gcp-vpc_{context}"] = About using a custom VPC -In {product-title} {product-version}, you can deploy a cluster into existing subnets in an existing Virtual Private Cloud (VPC) in Google Cloud Platform (GCP). By deploying {product-title} into an existing GCP VPC, you might be able to avoid limit constraints in new accounts or more easily abide by the operational constraints that your company's guidelines set. If you cannot obtain the infrastructure creation permissions that are required to create the VPC yourself, use this installation option. You must configure networking for the subnets. +In {product-title} {product-version}, you can deploy a cluster into existing subnets in an existing Virtual Private Cloud (VPC) in {gcp-first}. By deploying {product-title} into an existing {gcp-first} VPC, you might be able to avoid limit constraints in new accounts or more easily abide by the operational constraints that your company's guidelines set. If you cannot obtain the infrastructure creation permissions that are required to create the VPC yourself, use this installation option. You must configure networking for the subnets. [id="installation-custom-gcp-vpc-requirements_{context}"] == Requirements for using your VPC diff --git a/modules/installation-extracting-infraid.adoc b/modules/installation-extracting-infraid.adoc index dd4f8d62b948..82ed4b52e2a0 100644 --- a/modules/installation-extracting-infraid.adoc +++ b/modules/installation-extracting-infraid.adoc @@ -29,19 +29,19 @@ ifeval::["{context}" == "installing-azure-user-infra"] :azure: endif::[] ifeval::["{context}" == "installing-gcp-user-infra"] -:cp-first: Google Cloud Platform +:cp-first: Google Cloud :cp: GCP :cp-template: Deployment Manager :gcp: endif::[] ifeval::["{context}" == "installing-gcp-user-infra-vpc"] -:cp-first: Google Cloud Platform +:cp-first: Google Cloud :cp: GCP :cp-template: Deployment Manager :gcp: endif::[] ifeval::["{context}" == "installing-restricted-networks-gcp"] -:cp-first: Google Cloud Platform +:cp-first: Google Cloud :cp: GCP :cp-template: Deployment Manager :gcp: @@ -138,7 +138,7 @@ ifeval::["{context}" == "installing-gcp-user-infra"] :!gcp: endif::[] ifeval::["{context}" == "installing-gcp-user-infra-vpc"] -:!cp-first: Google Cloud Platform +:!cp-first: Google Cloud :!cp: GCP :!cp-template: Deployment Manager :!gcp: diff --git a/modules/installation-gcp-config-yaml.adoc b/modules/installation-gcp-config-yaml.adoc index 0329074211b8..8f89da023bea 100644 --- a/modules/installation-gcp-config-yaml.adoc +++ b/modules/installation-gcp-config-yaml.adoc @@ -24,7 +24,7 @@ ifeval::["{context}" == "installing-restricted-networks-gcp-installer-provisione endif::[] [id="installation-gcp-config-yaml_{context}"] -= Sample customized install-config.yaml file for GCP += Sample customized install-config.yaml file for {gcp-full} You can customize the `install-config.yaml` file to specify more details about your {product-title} cluster's platform or modify the values of the required parameters. @@ -200,7 +200,7 @@ endif::restricted[] ==== If you disable simultaneous multithreading, ensure that your capacity planning accounts for the dramatically decreased machine performance. Use larger machine types, such as `n1-standard-8`, for your machines if you disable simultaneous multithreading. ==== -<6> Optional: The custom encryption key section to encrypt both virtual machines and persistent volumes. Your default compute service account must have the permissions granted to use your KMS key and have the correct IAM role assigned. The default service account name follows the `service-@compute-system.iam.gserviceaccount.com` pattern. For more information about granting the correct permissions for your service account, see "Machine management" -> "Creating compute machine sets" -> "Creating a compute machine set on GCP". +<6> Optional: The custom encryption key section to encrypt both virtual machines and persistent volumes. Your default compute service account must have the permissions granted to use your KMS key and have the correct IAM role assigned. The default service account name follows the `service-@compute-system.iam.gserviceaccount.com` pattern. For more information about granting the correct permissions for your service account, see "Machine management" -> "Creating compute machine sets" -> "Creating a compute machine set on {gcp-full}". <7> Optional: A set of network tags to apply to the control plane or compute machine sets. The `platform.gcp.defaultMachinePlatform.tags` parameter will apply to both control plane and compute machines. If the `compute.platform.gcp.tags` or `controlPlane.platform.gcp.tags` parameters are set, they override the `platform.gcp.defaultMachinePlatform.tags` parameter. <8> Optional: A custom {op-system-first} that should be used to boot control plane and compute machines. The `project` and `name` parameters under `platform.gcp.defaultMachinePlatform.osImage` apply to both control plane and compute machines. If the `project` and `name` parameters under `controlPlane.platform.gcp.osImage` or `compute.platform.gcp.osImage` are set, they override the `platform.gcp.defaultMachinePlatform.osImage` parameters. <9> The cluster network plugin to install. The default value `OVNKubernetes` is the only supported value. diff --git a/modules/installation-gcp-dns.adoc b/modules/installation-gcp-dns.adoc index 75abc245fd33..0defb9183058 100644 --- a/modules/installation-gcp-dns.adoc +++ b/modules/installation-gcp-dns.adoc @@ -10,9 +10,9 @@ endif::[] :_mod-docs-content-type: PROCEDURE [id="installation-gcp-dns_{context}"] -= Configuring DNS for GCP += Configuring DNS for {gcp-full} -To install {product-title}, the Google Cloud Platform (GCP) account you use must +To install {product-title}, the {gcp-first} account you use must have a dedicated public hosted zone ifndef::user-infra-vpc[] in the same project that you host the {product-title} cluster. @@ -27,7 +27,7 @@ connections to the cluster. .Procedure . Identify your domain, or subdomain, and registrar. You can transfer an existing domain and -registrar or obtain a new one through GCP or another source. +registrar or obtain a new one through {gcp-short} or another source. + [NOTE] ==== @@ -36,16 +36,16 @@ changes to propagate. For more information about purchasing domains through Google, see link:https://domains.google/[Google Domains]. ==== -. Create a public hosted zone for your domain or subdomain in your GCP project. See +. Create a public hosted zone for your domain or subdomain in your {gcp-short} project. See link:https://cloud.google.com/dns/zones/#creating_public_zones[Creating public zones] -in the GCP documentation. +in the {gcp-short} documentation. + Use an appropriate root domain, such as `openshiftcorp.com`, or subdomain, such as `clusters.openshiftcorp.com`. . Extract the new authoritative name servers from the hosted zone records. See link:https://cloud.google.com/dns/docs/update-name-servers#look_up_your_name_servers[Look up your Cloud DNS name servers] -in the GCP documentation. +in the {gcp-short} documentation. + You typically have four name servers. @@ -54,7 +54,7 @@ uses. For example, if you registered your domain to Google Domains, see the following topic in the Google Domains Help: link:https://support.google.com/domains/answer/3290309?hl=en[How to switch to custom name servers]. -. If you migrated your root domain to Google Cloud DNS, migrate your DNS records. See link:https://cloud.google.com/dns/docs/migrating[Migrating to Cloud DNS] in the GCP documentation. +. If you migrated your root domain to Google Cloud DNS, migrate your DNS records. See link:https://cloud.google.com/dns/docs/migrating[Migrating to Cloud DNS] in the {gcp-short} documentation. . If you use a subdomain, follow your company's procedures to add its delegation records to the parent domain. This process might include a request to your company's IT department or the division that controls the root domain and DNS services for your company. diff --git a/modules/installation-gcp-enabling-api-services.adoc b/modules/installation-gcp-enabling-api-services.adoc index a8621b359126..b32e3e0210aa 100644 --- a/modules/installation-gcp-enabling-api-services.adoc +++ b/modules/installation-gcp-enabling-api-services.adoc @@ -16,9 +16,9 @@ endif::[] :_mod-docs-content-type: PROCEDURE [id="installation-gcp-enabling-api-services_{context}"] -= Enabling API services in GCP += Enabling API services in {gcp-full} -Your Google Cloud Platform (GCP) project requires access to several API services +Your {gcp-first} project requires access to several API services to complete {product-title} installation. .Prerequisites @@ -30,7 +30,7 @@ to complete {product-title} installation. * Enable the following required API services in the project that hosts your cluster. You may also enable optional API services which are not required for installation. See link:https://cloud.google.com/service-usage/docs/enable-disable#enabling[Enabling services] -in the GCP documentation. +in the {gcp-short} documentation. + .Required API services [cols="2a,3a",options="header"] diff --git a/modules/installation-gcp-install-cli.adoc b/modules/installation-gcp-install-cli.adoc index 3e1c22aff677..bdaa9d0b172e 100644 --- a/modules/installation-gcp-install-cli.adoc +++ b/modules/installation-gcp-install-cli.adoc @@ -5,10 +5,10 @@ :_mod-docs-content-type: PROCEDURE [id="installation-gcp-install-cli_{context}"] -= Installing and configuring CLI tools for GCP += Installing and configuring CLI tools for {gcp-full} -To install {product-title} on Google Cloud Platform (GCP) using user-provisioned -infrastructure, you must install and configure the CLI tools for GCP. +To install {product-title} on {gcp-full} using user-provisioned +infrastructure, you must install and configure the CLI tools for {gcp-full}. .Prerequisites @@ -25,8 +25,8 @@ infrastructure, you must install and configure the CLI tools for GCP. -- + See link:https://cloud.google.com/sdk/docs/#install_the_latest_cloud_tools_version_cloudsdk_current_version[Install the latest Cloud SDK version] -in the GCP documentation. +in the {gcp-short} documentation. . Authenticate using the `gcloud` tool with your configured service account. + -See link:https://cloud.google.com/sdk/docs/authorizing#authorizing_with_a_service_account[Authorizing with a service account] in the GCP documentation. +See link:https://cloud.google.com/sdk/docs/authorizing#authorizing_with_a_service_account[Authorizing with a service account] in the {gcp-short} documentation. diff --git a/modules/installation-gcp-limits.adoc b/modules/installation-gcp-limits.adoc index 52eca964f3fb..d55372779b24 100644 --- a/modules/installation-gcp-limits.adoc +++ b/modules/installation-gcp-limits.adoc @@ -16,9 +16,9 @@ endif::[] :_mod-docs-content-type: REFERENCE [id="installation-gcp-limits_{context}"] -= GCP account limits += {gcp-short} account limits -The {product-title} cluster uses a number of Google Cloud Platform (GCP) +The {product-title} cluster uses a number of {gcp-first} components, but the default link:https://cloud.google.com/docs/quota[Quotas] do not affect your ability to install a default {product-title} cluster. @@ -27,7 +27,7 @@ A default cluster, which contains three compute and three control plane machines uses the following resources. Note that some resources are required only during the bootstrap process and are removed after the cluster deploys. -.GCP resources used in a default cluster +.{gcp-full} resources used in a default cluster [cols="2a,2a,2a,2a,2a",options="header"] |=== diff --git a/modules/installation-gcp-marketplace.adoc b/modules/installation-gcp-marketplace.adoc index 16e56edad6b6..aba3c85ae670 100644 --- a/modules/installation-gcp-marketplace.adoc +++ b/modules/installation-gcp-marketplace.adoc @@ -4,14 +4,14 @@ :_mod-docs-content-type: PROCEDURE [id="installation-gcp-marketplace_{context}"] -= Using the GCP Marketplace offering += Using the {gcp-short} Marketplace offering -Using the GCP Marketplace offering lets you deploy an {product-title} cluster, which is billed on pay-per-use basis (hourly, per core) through GCP, while still being supported directly by Red{nbsp}Hat. +Using the {gcp-short} Marketplace offering lets you deploy an {product-title} cluster, which is billed on pay-per-use basis (hourly, per core) through GCP, while still being supported directly by Red{nbsp}Hat. -By default, the installation program downloads and installs the {op-system-first} image that is used to deploy compute machines. To deploy an {product-title} cluster using an {op-system} image from the GCP Marketplace, override the default behavior by modifying the `install-config.yaml` file to reference the location of GCP Marketplace offer. +By default, the installation program downloads and installs the {op-system-first} image that is used to deploy compute machines. To deploy an {product-title} cluster using an {op-system} image from the {gcp-short} Marketplace, override the default behavior by modifying the `install-config.yaml` file to reference the location of {gcp-short} Marketplace offer. -:platform-abbreviation: a GCP -:platform-abbreviation-short: GCP +:platform-abbreviation: a {gcp-full} +:platform-abbreviation-short: {gcp-full} [NOTE] ==== @@ -24,7 +24,7 @@ include::snippets/installation-marketplace-note.adoc[] .Procedure -. Edit the `compute.platform.gcp.osImage` parameters to specify the location of the GCP Marketplace image: +. Edit the `compute.platform.gcp.osImage` parameters to specify the location of the {gcp-short} Marketplace image: ** Set the `project` parameter to `redhat-marketplace-public` ** Set the `name` parameter to one of the following offers: + @@ -33,7 +33,7 @@ include::snippets/installation-marketplace-note.adoc[] {oke}:: `redhat-coreos-oke-413-x86-64-202305021736` . Save the file and reference it when deploying the cluster. -.Sample `install-config.yaml` file that specifies a GCP Marketplace image for compute machines +.Sample `install-config.yaml` file that specifies a {gcp-short} Marketplace image for compute machines [source,yaml] ---- apiVersion: v1 diff --git a/modules/installation-gcp-permissions.adoc b/modules/installation-gcp-permissions.adoc index e0f52654daa1..58b2c4f48774 100644 --- a/modules/installation-gcp-permissions.adoc +++ b/modules/installation-gcp-permissions.adoc @@ -16,7 +16,7 @@ endif::[] :_mod-docs-content-type: CONCEPT [id="installation-gcp-permissions_{context}"] -= Required GCP roles += Required {gcp-short} roles When you attach the `Owner` role to the service account that you create, you grant that service account all permissions, including those that are required to install {product-title}. If your organization's security policies require a more restrictive set of permissions, you can create a service account with the following permissions. If you deploy your cluster into an existing virtual private cloud (VPC), the service account does not require certain networking permissions, which are noted in the following lists: @@ -37,13 +37,13 @@ When you attach the `Owner` role to the service account that you create, you gra * Tag User ifdef::template[] -.Required roles for user-provisioned GCP infrastructure +.Required roles for user-provisioned {gcp-short} infrastructure * Deployment Manager Editor endif::template[] The following roles are applied to the service accounts that the control plane and compute machines use: -.GCP service account roles +.{gcp-full} service account roles [cols="2a,2a",options="header"] |=== |Account diff --git a/modules/installation-gcp-project.adoc b/modules/installation-gcp-project.adoc index aa6a84ececd5..53c2848a39ee 100644 --- a/modules/installation-gcp-project.adoc +++ b/modules/installation-gcp-project.adoc @@ -6,16 +6,16 @@ :_mod-docs-content-type: PROCEDURE [id="installation-gcp-project_{context}"] -= Creating a GCP project += Creating a {gcp-short} project -To install {product-title}, you must create a project in your Google Cloud Platform (GCP) account to host the cluster. +To install {product-title}, you must create a project in your {gcp-first} account to host the cluster. .Procedure * Create a project to host your {product-title} cluster. See -link:https://cloud.google.com/resource-manager/docs/creating-managing-projects[Creating and Managing Projects] in the GCP documentation. +link:https://cloud.google.com/resource-manager/docs/creating-managing-projects[Creating and Managing Projects] in the {gcp-short} documentation. + [IMPORTANT] ==== -Your GCP project must use the Premium Network Service Tier if you are using installer-provisioned infrastructure. The Standard Network Service Tier is not supported for clusters installed using the installation program. The installation program configures internal load balancing for the `api-int..` URL; the Premium Tier is required for internal load balancing. +Your {gcp-short} project must use the Premium Network Service Tier if you are using installer-provisioned infrastructure. The Standard Network Service Tier is not supported for clusters installed using the installation program. The installation program configures internal load balancing for the `api-int..` URL; the Premium Tier is required for internal load balancing. ==== diff --git a/modules/installation-gcp-regions.adoc b/modules/installation-gcp-regions.adoc index 24aba3478169..229c257bb9f0 100644 --- a/modules/installation-gcp-regions.adoc +++ b/modules/installation-gcp-regions.adoc @@ -6,9 +6,9 @@ :_mod-docs-content-type: REFERENCE [id="installation-gcp-regions_{context}"] -= Supported GCP regions += Supported {gcp-short} regions -You can deploy an {product-title} cluster to the following Google Cloud Platform (GCP) +You can deploy an {product-title} cluster to the following {gcp-first} regions: * `africa-south1` (Johannesburg, South Africa) diff --git a/modules/installation-gcp-service-account.adoc b/modules/installation-gcp-service-account.adoc index 9230e2a94c69..34bb1c12269d 100644 --- a/modules/installation-gcp-service-account.adoc +++ b/modules/installation-gcp-service-account.adoc @@ -6,9 +6,9 @@ :_mod-docs-content-type: PROCEDURE [id="installation-gcp-service-account_{context}"] -= Creating a service account in GCP += Creating a service account in {gcp-full} -{product-title} requires a Google Cloud Platform (GCP) service account that provides authentication and authorization to access data in the Google APIs. If you do not have an existing IAM service account that contains the required roles in your project, you must create one. +{product-title} requires a {gcp-first} service account that provides authentication and authorization to access data in the Google APIs. If you do not have an existing IAM service account that contains the required roles in your project, you must create one. .Prerequisites @@ -19,7 +19,7 @@ . Create a service account in the project that you use to host your {product-title} cluster. See link:https://cloud.google.com/iam/docs/creating-managing-service-accounts#creating_a_service_account[Creating a service account] -in the GCP documentation. +in the {gcp-short} documentation. . Grant the service account the appropriate permissions. You can either grant the individual permissions that follow or assign the `Owner` role to it. @@ -30,8 +30,8 @@ See link:https://cloud.google.com/iam/docs/granting-roles-to-service-accounts#gr While making the service account an owner of the project is the easiest way to gain the required permissions, it means that service account has complete control over the project. You must determine if the risk that comes from offering that power is acceptable. ==== -. You can create the service account key in JSON format, or attach the service account to a GCP virtual machine. -See link:https://cloud.google.com/iam/docs/creating-managing-service-account-keys#creating_service_account_keys[Creating service account keys] and link:https://cloud.google.com/compute/docs/access/create-enable-service-accounts-for-instances[Creating and enabling service accounts for instances] in the GCP documentation. +. You can create the service account key in JSON format, or attach the service account to a {gcp-short} virtual machine. +See link:https://cloud.google.com/iam/docs/creating-managing-service-account-keys#creating_service_account_keys[Creating service account keys] and link:https://cloud.google.com/compute/docs/access/create-enable-service-accounts-for-instances[Creating and enabling service accounts for instances] in the {gcp-full} documentation. + [NOTE] ==== diff --git a/modules/installation-gcp-shared-vpc-config.adoc b/modules/installation-gcp-shared-vpc-config.adoc index a851db312f77..5d595a1fde45 100644 --- a/modules/installation-gcp-shared-vpc-config.adoc +++ b/modules/installation-gcp-shared-vpc-config.adoc @@ -4,7 +4,7 @@ :_mod-docs-content-type: PROCEDURE [id="installation-gcp-shared-vpc-config_{context}"] = Sample customized install-config.yaml file for shared VPC installation -There are several configuration parameters which are required to install {product-title} on GCP using a shared VPC. The following is a sample `install-config.yaml` file which demonstrates these fields. +There are several configuration parameters which are required to install {product-title} on {gcp-short} using a shared VPC. The following is a sample `install-config.yaml` file which demonstrates these fields. [IMPORTANT] ==== @@ -60,11 +60,11 @@ networking: pullSecret: '{"auths": ...}' sshKey: ssh-ed25519 AAAA... <8> ---- -<1> `credentialsMode` must be set to `Passthrough` or `Manual`. See the "Prerequisites" section for the required GCP permissions that your service account must have. +<1> `credentialsMode` must be set to `Passthrough` or `Manual`. See the "Prerequisites" section for the required {gcp-short} permissions that your service account must have. <2> The name of the subnet in the shared VPC for compute machines to use. <3> The name of the subnet in the shared VPC for control plane machines to use. <4> The name of the shared VPC. <5> The name of the host project where the shared VPC exists. -<6> The name of the GCP project where you want to install the cluster. +<6> The name of the {gcp-short} project where you want to install the cluster. <7> Optional. One or more network tags to apply to compute machines, control plane machines, or all machines. <8> You can optionally provide the `sshKey` value that you use to access the machines in your cluster. \ No newline at end of file diff --git a/modules/installation-gcp-shared-vpc-ingress.adoc b/modules/installation-gcp-shared-vpc-ingress.adoc index 38aabec405af..0314a937ed92 100644 --- a/modules/installation-gcp-shared-vpc-ingress.adoc +++ b/modules/installation-gcp-shared-vpc-ingress.adoc @@ -7,7 +7,7 @@ If the public DNS zone exists in a host project outside the project where you installed your cluster, you must manually create DNS records that point at the Ingress load balancer. You can create either a wildcard `*.apps.{baseDomain}.` or specific records. You can use A, CNAME, and other records per your requirements. .Prerequisites -* You completed the installation of {product-title} on GCP into a shared VPC. +* You completed the installation of {product-title} on {gcp-short} into a shared VPC. * Your public DNS zone exists in a host project separate from the service project that contains your cluster. .Procedure @@ -30,7 +30,7 @@ router-default LoadBalancer 172.30.18.154 35.233.157.184 80:32288/TCP,44 ---- $ oc -n openshift-ingress get service router-default --no-headers | awk '{print $4}' ---- -. Add a record to your GCP public zone with the router's external IP address and the name `*.apps..`. You can use the `gcloud` command-line utility or the GCP web console. +. Add a record to your {gcp-short} public zone with the router's external IP address and the name `*.apps..`. You can use the `gcloud` command-line utility or the {gcp-short} web console. . To add manual records instead of a wildcard record, create entries for each of the cluster's current routes. You can gather these routes by running the following command: + [source,terminal] diff --git a/modules/installation-gcp-tested-machine-types-arm.adoc b/modules/installation-gcp-tested-machine-types-arm.adoc index fb088e573bd3..b6ceac244c00 100644 --- a/modules/installation-gcp-tested-machine-types-arm.adoc +++ b/modules/installation-gcp-tested-machine-types-arm.adoc @@ -11,9 +11,9 @@ :_mod-docs-content-type: REFERENCE [id="installation-gcp-tested-machine-types-arm_{context}"] -= Tested instance types for GCP on 64-bit ARM infrastructures += Tested instance types for {gcp-short} on 64-bit ARM infrastructures -The following Google Cloud Platform (GCP) 64-bit ARM instance types have been tested with {product-title}. +The following {gcp-first} 64-bit ARM instance types have been tested with {product-title}. .Machine series for 64-bit ARM machines [%collapsible] diff --git a/modules/installation-gcp-tested-machine-types.adoc b/modules/installation-gcp-tested-machine-types.adoc index 3e9c2432bb47..773c51033e88 100644 --- a/modules/installation-gcp-tested-machine-types.adoc +++ b/modules/installation-gcp-tested-machine-types.adoc @@ -11,9 +11,9 @@ :_mod-docs-content-type: REFERENCE [id="installation-gcp-tested-machine-types_{context}"] -= Tested instance types for GCP += Tested instance types for {gcp-full} -The following Google Cloud Platform instance types have been tested with {product-title}. +The following {gcp-full} instance types have been tested with {product-title}. [NOTE] ==== diff --git a/modules/installation-gcp-user-infra-completing.adoc b/modules/installation-gcp-user-infra-completing.adoc index 6a3bb3d7c9b1..ef96c285790b 100644 --- a/modules/installation-gcp-user-infra-completing.adoc +++ b/modules/installation-gcp-user-infra-completing.adoc @@ -5,9 +5,9 @@ :_mod-docs-content-type: PROCEDURE [id="installation-gcp-user-infra-installation_{context}"] -= Completing a GCP installation on user-provisioned infrastructure += Completing a {gcp-short} installation on user-provisioned infrastructure -After you start the {product-title} installation on Google Cloud Platform (GCP) +After you start the {product-title} installation on {gcp-first} user-provisioned infrastructure, you can monitor the cluster events until the cluster is ready. diff --git a/modules/installation-gcp-user-infra-config-host-project-vpc.adoc b/modules/installation-gcp-user-infra-config-host-project-vpc.adoc index d6a55aa14024..74cbd9d06be2 100644 --- a/modules/installation-gcp-user-infra-config-host-project-vpc.adoc +++ b/modules/installation-gcp-user-infra-config-host-project-vpc.adoc @@ -4,9 +4,9 @@ :_mod-docs-content-type: PROCEDURE [id="installation-gcp-user-infra-config-host-project-vpc_{context}"] -= Configuring the GCP project that hosts your shared VPC network += Configuring the {gcp-short} project that hosts your shared VPC network -If you use a shared Virtual Private Cloud (VPC) to host your {product-title} cluster in Google Cloud Platform (GCP), you must configure the project that hosts it. +If you use a shared Virtual Private Cloud (VPC) to host your {product-title} cluster in {gcp-first}, you must configure the project that hosts it. [NOTE] ==== @@ -16,11 +16,11 @@ If you already have a project that hosts the shared VPC network, review this sec .Procedure . Create a project to host the shared VPC for your {product-title} cluster. See -link:https://cloud.google.com/resource-manager/docs/creating-managing-projects[Creating and Managing Projects] in the GCP documentation. +link:https://cloud.google.com/resource-manager/docs/creating-managing-projects[Creating and Managing Projects] in the {gcp-short} documentation. . Create a service account in the project that hosts your shared VPC. See link:https://cloud.google.com/iam/docs/creating-managing-service-accounts#creating_a_service_account[Creating a service account] -in the GCP documentation. +in the {gcp-short} documentation. . Grant the service account the appropriate permissions. You can either grant the individual permissions that follow or assign the `Owner` role to it. diff --git a/modules/installation-gcp-user-infra-rhcos.adoc b/modules/installation-gcp-user-infra-rhcos.adoc index 3be7ef1f3524..bd64860a595e 100644 --- a/modules/installation-gcp-user-infra-rhcos.adoc +++ b/modules/installation-gcp-user-infra-rhcos.adoc @@ -4,9 +4,9 @@ :_mod-docs-content-type: PROCEDURE [id="installation-gcp-user-infra-rhcos_{context}"] -= Creating the {op-system} cluster image for the GCP infrastructure += Creating the {op-system} cluster image for the {gcp-short} infrastructure -You must use a valid {op-system-first} image for Google Cloud Platform (GCP) for +You must use a valid {op-system-first} image for {gcp-first} for your {product-title} nodes. .Procedure diff --git a/modules/installation-gcp-user-infra-shared-vpc-config-yaml.adoc b/modules/installation-gcp-user-infra-shared-vpc-config-yaml.adoc index 1d17a4b4fe32..ca18c6b9582f 100644 --- a/modules/installation-gcp-user-infra-shared-vpc-config-yaml.adoc +++ b/modules/installation-gcp-user-infra-shared-vpc-config-yaml.adoc @@ -3,7 +3,7 @@ // * installing/installing_gcp/installing-gcp-user-infra-vpc.adoc [id="installation-gcp-user-infra-shared-vpc-config-yaml_{context}"] -= Sample customized `install-config.yaml` file for GCP += Sample customized `install-config.yaml` file for {gcp-full} You can customize the `install-config.yaml` file to specify more details about your {product-title} cluster's platform or modify the values of the required parameters. diff --git a/modules/installation-gcp-user-infra-wait-for-bootstrap.adoc b/modules/installation-gcp-user-infra-wait-for-bootstrap.adoc index f34c7f770c46..ea8dcba8616d 100644 --- a/modules/installation-gcp-user-infra-wait-for-bootstrap.adoc +++ b/modules/installation-gcp-user-infra-wait-for-bootstrap.adoc @@ -5,13 +5,13 @@ :_mod-docs-content-type: PROCEDURE [id="installation-gcp-user-infra-wait-for-bootstrap_{context}"] -= Removing bootstrap resources in GCP += Removing bootstrap resources in {gcp-full} After you create all of the required infrastructure in {gcp-first}, wait for the bootstrap process to complete on the machines that you provisioned by using the Ignition config files. The installation program created the Ignition config files. .Prerequisites -* Ensure you defined the variables in the _Exporting common variables_ and _Creating load balancers in GCP_ sections. +* Ensure you defined the variables in the _Exporting common variables_ and _Creating load balancers in {gcp-full}_ sections. * Create the bootstrap machine. * Create the control plane machines. diff --git a/modules/installation-initializing.adoc b/modules/installation-initializing.adoc index 3cffcb04b421..16b450c712a0 100644 --- a/modules/installation-initializing.adoc +++ b/modules/installation-initializing.adoc @@ -168,7 +168,7 @@ ifdef::azure[] Microsoft Azure. endif::azure[] ifdef::gcp[] -Google Cloud Platform (GCP). +{gcp-first}. endif::gcp[] ifdef::ibm-cloud[] {ibm-cloud-name}. @@ -288,8 +288,8 @@ to the Azure DNS Zone that you created for your cluster. endif::azure[] ifdef::gcp[] ... Select *gcp* as the platform to target. -... If you have not configured the service account key for your GCP account on -your computer, you must obtain it from GCP and paste the contents of the file +... If you have not configured the service account key for your {gcp-short} account on +your computer, you must obtain it from {gcp-full} and paste the contents of the file or enter the absolute path to the file. ... Select the project ID to provision the cluster in. The default value is specified by the service account that you configured. diff --git a/modules/installation-launching-installer.adoc b/modules/installation-launching-installer.adoc index 1027eaae42ba..f0342389eb61 100644 --- a/modules/installation-launching-installer.adoc +++ b/modules/installation-launching-installer.adoc @@ -286,8 +286,8 @@ endif::vsphere[] .Procedure ifdef::gcp[] -. Remove any existing GCP credentials that do not use the service account key -for the GCP account that you configured for your cluster and that are stored in the +. Remove any existing {gcp-short} credentials that do not use the service account key +for the {gcp-full} account that you configured for your cluster and that are stored in the following locations: ** The `GOOGLE_CREDENTIALS`, `GOOGLE_CLOUD_KEYFILE_JSON`, or `GCLOUD_KEYFILE_JSON` environment variables @@ -393,8 +393,8 @@ If the installation program cannot locate the `osServicePrincipal.json` configur endif::azure[] ifdef::gcp[] .. Select *gcp* as the platform to target. -.. If you have not configured the service account key for your GCP account on -your host, you must obtain it from GCP and paste the contents of the file +.. If you have not configured the service account key for your {gcp-short} account on +your host, you must obtain it from {gcp-full} and paste the contents of the file or enter the absolute path to the file. .. Select the project ID to provision the cluster in. The default value is specified by the service account that you configured. diff --git a/modules/installation-special-config-rtkernel.adoc b/modules/installation-special-config-rtkernel.adoc index ec24e148e9c8..af3b8f55d72e 100644 --- a/modules/installation-special-config-rtkernel.adoc +++ b/modules/installation-special-config-rtkernel.adoc @@ -24,7 +24,7 @@ procedure. This procedure is fully supported with bare metal installations using systems that are certified for Red Hat Enterprise Linux for Real Time 8. Real time support in {product-title} is also limited to specific subscriptions. -This procedure is also supported for use with Google Cloud Platform. +This procedure is also supported for use with {gcp-full}. ==== .Prerequisites diff --git a/modules/installation-three-node-cluster-cloud-provider.adoc b/modules/installation-three-node-cluster-cloud-provider.adoc index 76a11841e2b7..f7440473b18a 100644 --- a/modules/installation-three-node-cluster-cloud-provider.adoc +++ b/modules/installation-three-node-cluster-cloud-provider.adoc @@ -69,7 +69,7 @@ ifdef::azure[] For more information, see "Creating the Kubernetes manifest and Ignition config files" in "Installing a cluster on Azure using ARM templates". endif::azure[] ifdef::gcp[] -For more information, see "Creating the Kubernetes manifest and Ignition config files" in "Installing a cluster on user-provisioned infrastructure in GCP by using Deployment Manager templates". +For more information, see "Creating the Kubernetes manifest and Ignition config files" in "Installing a cluster on user-provisioned infrastructure in {gcp-short} by using Deployment Manager templates". endif::gcp[] ** Do not create additional worker nodes. endif::vsphere,nutanix,openstack[] diff --git a/modules/installation-user-infra-exporting-common-variables.adoc b/modules/installation-user-infra-exporting-common-variables.adoc index 46cb81d9e3b3..36e81152f9db 100644 --- a/modules/installation-user-infra-exporting-common-variables.adoc +++ b/modules/installation-user-infra-exporting-common-variables.adoc @@ -10,25 +10,25 @@ ifeval::["{context}" == "installing-gcp-user-infra-vpc"] endif::[] ifeval::["{context}" == "installing-gcp-user-infra"] -:cp-first: Google Cloud Platform +:cp-first: Google Cloud :cp: GCP :cp-template: Deployment Manager endif::[] ifeval::["{context}" == "installing-restricted-networks-gcp"] -:cp-first: Google Cloud Platform +:cp-first: Google Cloud :cp: GCP :cp-template: Deployment Manager endif::[] ifeval::["{context}" == "installing-restricted-networks-gcp-vpc"] -:cp-first: Google Cloud Platform +:cp-first: Google Cloud :cp: GCP :cp-template: Deployment Manager endif::[] ifeval::["{context}" == "installing-gcp-user-infra-vpc"] -:cp-first: Google Cloud Platform +:cp-first: Google Cloud :cp: GCP :cp-template: Deployment Manager :shared-vpc: @@ -40,7 +40,7 @@ endif::[] You must export a common set of variables that are used with the provided {cp-template} templates used to assist in completing a user-provided -infrastructure install on {cp-first} ({cp}). +infrastructure install on {cp-first}. [NOTE] ==== @@ -106,14 +106,14 @@ ifeval::["{context}" == "installing-restricted-networks-gcp"] endif::[] ifeval::["{context}" == "installing-restricted-networks-gcp-vpc"] -:!cp-first: Google Cloud Platform +:!cp-first: Google Cloud :!cp: GCP :!cp-template: Deployment Manager endif::[] ifeval::["{context}" == "installing-gcp-user-infra-vpc"] -:!cp-first: Google Cloud Platform +:!cp-first: Google Cloud :!cp: GCP :!cp-template: Deployment Manager :!shared-vpc: -endif::[] +endif::[] \ No newline at end of file diff --git a/modules/installation-user-infra-generate.adoc b/modules/installation-user-infra-generate.adoc index 8e5412c427a0..05e55411f29d 100644 --- a/modules/installation-user-infra-generate.adoc +++ b/modules/installation-user-infra-generate.adoc @@ -43,22 +43,22 @@ ifeval::["{context}" == "installing-azure-stack-hub-user-infra"] :ash: endif::[] ifeval::["{context}" == "installing-gcp-user-infra"] -:cp-first: Google Cloud Platform +:cp-first: Google Cloud :cp: GCP :gcp: endif::[] ifeval::["{context}" == "installing-gcp-user-infra-vpc"] -:cp-first: Google Cloud Platform +:cp-first: Google Cloud :cp: GCP :gcp: endif::[] ifeval::["{context}" == "installing-gcp-shared-vpc"] -:cp-first: Google Cloud Platform +:cp-first: Google Cloud :cp: GCP :gcp-shared: endif::[] ifeval::["{context}" == "installing-restricted-networks-gcp"] -:cp-first: Google Cloud Platform +:cp-first: Google Cloud :cp: GCP :gcp: endif::[] @@ -129,12 +129,12 @@ ifeval::["{context}" == "installing-gcp-user-infra"] :!gcp: endif::[] ifeval::["{context}" == "installing-gcp-user-infra-vpc"] -:!cp-first: Google Cloud Platform +:!cp-first: Google Cloud :!cp: GCP :!gcp: endif::[] ifeval::["{context}" == "installing-gcp-shared-vpc"] -:!cp-first: Google Cloud Platform +:!cp-first: Google Cloud :!cp: GCP :!gcp-shared: endif::[] diff --git a/modules/installing-gcp-cluster-creation.adoc b/modules/installing-gcp-cluster-creation.adoc index 6705fe9a7cb3..1aeae4ecbadb 100644 --- a/modules/installing-gcp-cluster-creation.adoc +++ b/modules/installing-gcp-cluster-creation.adoc @@ -3,7 +3,7 @@ :_mod-docs-content-type: PROCEDURE [id="installing-gcp-cluster-creation_{context}"] -= Configuring user-defined labels and tags for GCP += Configuring user-defined labels and tags for {gcp-full} Configuring user-defined labels and tags for {gcp-full} means that you can apply key-value pairs to your cloud resources for the purposes of organizing, managing, and automating your infrastructure. diff --git a/modules/installing-gcp-querying-labels-tags-gcp.adoc b/modules/installing-gcp-querying-labels-tags-gcp.adoc index db451e61618c..e86824bca5f0 100644 --- a/modules/installing-gcp-querying-labels-tags-gcp.adoc +++ b/modules/installing-gcp-querying-labels-tags-gcp.adoc @@ -3,9 +3,9 @@ :_mod-docs-content-type: REFERENCE [id="installing-gcp-querying-labels-tags-gcp_{context}"] -= Querying user-defined labels and tags for GCP += Querying user-defined labels and tags for {gcp-full} -After creating the {product-title} cluster, you can access the list of the labels and tags defined for the GCP resources in the `infrastructures.config.openshift.io/cluster` object as shown in the following sample `infrastructure.yaml` file. +After creating the {product-title} cluster, you can access the list of the labels and tags defined for the {gcp-short} resources in the `infrastructures.config.openshift.io/cluster` object as shown in the following sample `infrastructure.yaml` file. .Sample `infrastructure.yaml` file [source,yaml] diff --git a/modules/installing-gcp-user-defined-labels-and-tags.adoc b/modules/installing-gcp-user-defined-labels-and-tags.adoc index c3c91beac674..47d46e471ea1 100644 --- a/modules/installing-gcp-user-defined-labels-and-tags.adoc +++ b/modules/installing-gcp-user-defined-labels-and-tags.adoc @@ -3,11 +3,11 @@ :_mod-docs-content-type: CONCEPT [id="installing-gcp-user-defined-labels-and-tags_{context}"] -= Managing user-defined labels and tags for GCP += Managing user-defined labels and tags for {gcp-full} -Google Cloud Platform (GCP) provides labels and tags that help to identify and organize the resources created for a specific {product-title} cluster, making them easier to manage. +{gcp-first} provides labels and tags that help to identify and organize the resources created for a specific {product-title} cluster, making them easier to manage. -You can define labels and tags for each GCP resource only during {product-title} cluster installation. +You can define labels and tags for each {gcp-short} resource only during {product-title} cluster installation. [IMPORTANT] ==== @@ -23,40 +23,40 @@ You cannot update the tags that are already added. Also, a new tag-supported res User-defined labels and {product-title} specific labels are applied only to resources created by {product-title} installation program and its core components such as: -* GCP filestore CSI Driver Operator -* GCP PD CSI Driver Operator +* {gcp-short} filestore CSI Driver Operator +* {gcp-full} PD CSI Driver Operator * Image Registry Operator -* Machine API provider for GCP +* Machine API provider for {gcp-full} User-defined labels are not attached to the resources created by any other Operators or the Kubernetes in-tree components. -User-defined labels and {product-title} labels are available on the following GCP resources: +User-defined labels and {product-title} labels are available on the following {gcp-short} resources: * Compute disk * Compute forwarding rule * Compute image * Compute instance * DNS managed zone -* Filestore backup +* Filestore backup * Filestore instance * Storage bucket .Limitations to user-defined labels -* Labels for `ComputeAddress` are supported in the GCP beta version. {product-title} does not add labels to the resource. +* Labels for `ComputeAddress` are supported in the {gcp-short} beta version. {product-title} does not add labels to the resource. .User-defined tags User-defined tags are applied only to resources created by {product-title} installation program and its core components, such as the following resources: -* GCP FileStore CSI Driver Operator -* GCP PD CSI Driver Operator +* {gcp-short} FileStore CSI Driver Operator +* {gcp-full} PD CSI Driver Operator * Image Registry Operator -* Machine API provider for GCP +* Machine API provider for {gcp-full} User-defined tags are not attached to the resources created by any other Operators or the Kubernetes in-tree components. -User-defined tags are available on the following GCP resources: +User-defined tags are available on the following {gcp-short} resources: * Compute disk * Compute instance diff --git a/modules/logging-loki-storage-gcp.adoc b/modules/logging-loki-storage-gcp.adoc index 28737f800478..e3f41215905a 100644 --- a/modules/logging-loki-storage-gcp.adoc +++ b/modules/logging-loki-storage-gcp.adoc @@ -4,19 +4,19 @@ :_mod-docs-content-type: PROCEDURE [id="logging-loki-storage-gcp_{context}"] -= Google Cloud Platform storage += {gcp-full} storage .Prerequisites * You installed the {loki-op}. * You installed the {oc-first}. -* You created a link:https://cloud.google.com/resource-manager/docs/creating-managing-projects[project] on Google Cloud Platform (GCP). +* You created a link:https://cloud.google.com/resource-manager/docs/creating-managing-projects[project] on {gcp-first}. * You created a link:https://cloud.google.com/storage/docs/creating-buckets[bucket] in the same project. -* You created a link:https://cloud.google.com/docs/authentication/getting-started#creating_a_service_account[service account] in the same project for GCP authentication. +* You created a link:https://cloud.google.com/docs/authentication/getting-started#creating_a_service_account[service account] in the same project for {gcp-short} authentication. .Procedure -. Copy the service account credentials received from GCP into a file called `key.json`. +. Copy the service account credentials received from {gcp-short} into a file called `key.json`. . Create an object storage secret with the name `logging-loki-gcs` by running the following command: + diff --git a/modules/logging-release-notes-5-9-0.adoc b/modules/logging-release-notes-5-9-0.adoc index 5e2a8b4d491b..796c218eb77a 100644 --- a/modules/logging-release-notes-5-9-0.adoc +++ b/modules/logging-release-notes-5-9-0.adoc @@ -47,7 +47,7 @@ The {logging-uc} 5.9 release does not contain an updated version of the {es-op}. * With this update, the validation of the Azure storage secret is now extended to give early warning for certain error conditions. (link:https://issues.redhat.com/browse/LOG-4571[LOG-4571]) -* With this update, Loki now adds upstream and downstream support for GCP workload identity federation mechanism. This allows authenticated and authorized access to the corresponding object storage services. (link:https://issues.redhat.com/browse/LOG-4754[LOG-4754]) +* With this update, Loki now adds upstream and downstream support for {gcp-short} workload identity federation mechanism. This allows authenticated and authorized access to the corresponding object storage services. (link:https://issues.redhat.com/browse/LOG-4754[LOG-4754]) [id="logging-release-notes-5-9-0-bug-fixes"] == Bug Fixes diff --git a/modules/logging-rn-5.7.4.adoc b/modules/logging-rn-5.7.4.adoc index 4ec53012e9c5..0e0a66e578c6 100644 --- a/modules/logging-rn-5.7.4.adoc +++ b/modules/logging-rn-5.7.4.adoc @@ -14,7 +14,7 @@ This release includes link:https://access.redhat.com/errata/RHSA-2023:4341[OpenS * Before this update, the Operator would fail if the Fluentd collector was configured with Splunk as an output, due to this configuration being unsupported. With this update, configuration validation rejects unsupported outputs, resolving the issue. (link:https://issues.redhat.com/browse/LOG-4237[LOG-4237]) -* Before this update, when the Vector collector was updated an `enabled = true` value in the TLS configuration for AWS Cloudwatch logs and the GCP Stackdriver caused a configuration error. With this update, `enabled = true` value will be removed for these outputs, resolving the issue. (link:https://issues.redhat.com/browse/LOG-4242[LOG-4242]) +* Before this update, when the Vector collector was updated an `enabled = true` value in the TLS configuration for AWS Cloudwatch logs and the {gcp-short} Stackdriver caused a configuration error. With this update, `enabled = true` value will be removed for these outputs, resolving the issue. (link:https://issues.redhat.com/browse/LOG-4242[LOG-4242]) * Before this update, the Vector collector occasionally panicked with the following error message in its log: `thread 'vector-worker' panicked at 'all branches are disabled and there is no else branch', src/kubernetes/reflector.rs:26:9`. With this update, the error has been resolved. (link:https://issues.redhat.com/browse/LOG-4275[LOG-4275]) diff --git a/modules/machine-autoscaler-cr.adoc b/modules/machine-autoscaler-cr.adoc index e33afbd0dd32..85d420066219 100644 --- a/modules/machine-autoscaler-cr.adoc +++ b/modules/machine-autoscaler-cr.adoc @@ -26,7 +26,7 @@ spec: name: worker-us-east-1a <6> ---- <1> Specify the machine autoscaler name. To make it easier to identify which compute machine set this machine autoscaler scales, specify or include the name of the compute machine set to scale. The compute machine set name takes the following form: `--`. -<2> Specify the minimum number machines of the specified type that must remain in the specified zone after the cluster autoscaler initiates cluster scaling. If running in AWS, GCP, Azure, {rh-openstack}, or vSphere, this value can be set to `0`. For other providers, do not set this value to `0`. +<2> Specify the minimum number machines of the specified type that must remain in the specified zone after the cluster autoscaler initiates cluster scaling. If running in AWS, {gcp-short}, Azure, {rh-openstack}, or vSphere, this value can be set to `0`. For other providers, do not set this value to `0`. + You can save on costs by setting this value to `0` for use cases such as running expensive or limited-usage hardware that is used for specialized workloads, or by scaling a compute machine set with extra large machines. The cluster autoscaler scales the compute machine set down to zero if the machines are not in use. + diff --git a/modules/machine-feature-agnostic-nonguaranteed-instances.adoc b/modules/machine-feature-agnostic-nonguaranteed-instances.adoc index c56e4710cced..e98dd4a39e20 100644 --- a/modules/machine-feature-agnostic-nonguaranteed-instances.adoc +++ b/modules/machine-feature-agnostic-nonguaranteed-instances.adoc @@ -1,7 +1,7 @@ // Module included in the following assemblies: // // * machine_management/cluster_api_machine_management/cluster_api_provider_configurations/cluster-api-config-options-aws.adoc -// There are parallel features in Azure and GCP so this module is set up for reuse. +// There are parallel features in Azure and {gcp-short} so this module is set up for reuse. ifeval::["{context}" == "cluster-api-config-options-aws"] :aws: diff --git a/modules/machineset-creating-non-guaranteed-instances.adoc b/modules/machineset-creating-non-guaranteed-instances.adoc index db6d0c70fe6b..a732ab123d9a 100644 --- a/modules/machineset-creating-non-guaranteed-instances.adoc +++ b/modules/machineset-creating-non-guaranteed-instances.adoc @@ -22,7 +22,7 @@ ifdef::gcp[= Creating preemptible VM instances by using compute machine sets] ifdef::aws[You can launch a Spot Instance on AWS by adding `spotMarketOptions` to your compute machine set YAML file.] ifdef::azure[You can launch a Spot VM on Azure by adding `spotVMOptions` to your compute machine set YAML file.] -ifdef::gcp[You can launch a preemptible VM instance on GCP by adding `preemptible` to your compute machine set YAML file.] +ifdef::gcp[You can launch a preemptible VM instance on {gcp-short} by adding `preemptible` to your compute machine set YAML file.] .Procedure * Add the following line under the `providerSpec` field: diff --git a/modules/machineset-gcp-confidential-vm.adoc b/modules/machineset-gcp-confidential-vm.adoc index 0de09f540070..5f48078f1e86 100644 --- a/modules/machineset-gcp-confidential-vm.adoc +++ b/modules/machineset-gcp-confidential-vm.adoc @@ -13,12 +13,12 @@ endif::[] By editing the machine set YAML file, you can configure the Confidential VM options that a machine set uses for machines that it deploys. -For more information about Confidential VM features, functions, and compatibility, see the GCP Compute Engine documentation about link:https://cloud.google.com/confidential-computing/confidential-vm/docs/about-cvm#confidential-vm[Confidential VM]. +For more information about Confidential VM features, functions, and compatibility, see the {gcp-short} Compute Engine documentation about link:https://cloud.google.com/confidential-computing/confidential-vm/docs/about-cvm#confidential-vm[Confidential VM]. [NOTE] ==== Confidential VMs are currently not supported on 64-bit ARM architectures. -If you use Confidential VM, you must ensure that you select a supported region. For details on supported regions and configurations, see the GCP Compute Engine documentation about link:https://cloud.google.com/confidential-computing/confidential-vm/docs/supported-configurations#supported-zones[supported zones]. +If you use Confidential VM, you must ensure that you select a supported region. For details on supported regions and configurations, see the {gcp-short} Compute Engine documentation about link:https://cloud.google.com/confidential-computing/confidential-vm/docs/supported-configurations#supported-zones[supported zones]. ==== .Procedure diff --git a/modules/machineset-gcp-enabling-customer-managed-encryption.adoc b/modules/machineset-gcp-enabling-customer-managed-encryption.adoc index 6b1a7262ca3f..2df320b7c5aa 100644 --- a/modules/machineset-gcp-enabling-customer-managed-encryption.adoc +++ b/modules/machineset-gcp-enabling-customer-managed-encryption.adoc @@ -11,7 +11,7 @@ endif::[] [id="machineset-gcp-enabling-customer-managed-encryption_{context}"] = Enabling customer-managed encryption keys for a machine set -Google Cloud Platform (GCP) Compute Engine allows users to supply an encryption key to encrypt data on disks at rest. The key is used to encrypt the data encryption key, not to encrypt the customer's data. By default, Compute Engine encrypts this data by using Compute Engine keys. +{gcp-first} Compute Engine allows users to supply an encryption key to encrypt data on disks at rest. The key is used to encrypt the data encryption key, not to encrypt the customer's data. By default, Compute Engine encrypts this data by using Compute Engine keys. You can enable encryption with a customer-managed key in clusters that use the Machine API. You must first link:https://cloud.google.com/compute/docs/disks/customer-managed-encryption#before_you_begin[create a KMS key] and assign the correct permissions to a service account. The KMS key name, key ring name, and location are required to allow a service account to use your key. @@ -63,7 +63,7 @@ spec: ---- <1> The name of the customer-managed encryption key that is used for the disk encryption. <2> The name of the KMS key ring that the KMS key belongs to. -<3> The GCP location in which the KMS key ring exists. +<3> The {gcp-short} location in which the KMS key ring exists. <4> Optional: The ID of the project in which the KMS key ring exists. If a project ID is not set, the machine set `projectID` in which the machine set was created is used. <5> Optional: The service account that is used for the encryption request for the given KMS key. If a service account is not set, the Compute Engine default service account is used. + diff --git a/modules/machineset-gcp-enabling-gpu-support.adoc b/modules/machineset-gcp-enabling-gpu-support.adoc index 030779d91e2e..19fee24496af 100644 --- a/modules/machineset-gcp-enabling-gpu-support.adoc +++ b/modules/machineset-gcp-enabling-gpu-support.adoc @@ -6,7 +6,7 @@ [id="machineset-gcp-enabling-gpu-support_{context}"] = Enabling GPU support for a compute machine set -Google Cloud Platform (GCP) Compute Engine enables users to add GPUs to VM instances. Workloads that benefit from access to GPU resources can perform better on compute machines with this feature enabled. {product-title} on GCP supports NVIDIA GPU models in the A2 and N1 machine series. +{gcp-first} Compute Engine enables users to add GPUs to VM instances. Workloads that benefit from access to GPU resources can perform better on compute machines with this feature enabled. {product-title} on {gcp-first} supports NVIDIA GPU models in the A2 and N1 machine series. .Supported GPU configurations |==== @@ -60,7 +60,7 @@ a|* `a2-highgpu-1g` |==== [.small] -- -1. For more information about machine types, including specifications, compatibility, regional availability, and limitations, see the GCP Compute Engine documentation about link:https://cloud.google.com/compute/docs/general-purpose-machines#n1_machines[N1 machine series], link:https://cloud.google.com/compute/docs/accelerator-optimized-machines#a2_vms[A2 machine series], and link:https://cloud.google.com/compute/docs/gpus/gpu-regions-zones#gpu_regions_and_zones[GPU regions and zones availability]. +1. For more information about machine types, including specifications, compatibility, regional availability, and limitations, see the {gcp-short} Compute Engine documentation about link:https://cloud.google.com/compute/docs/general-purpose-machines#n1_machines[N1 machine series], link:https://cloud.google.com/compute/docs/accelerator-optimized-machines#a2_vms[A2 machine series], and link:https://cloud.google.com/compute/docs/gpus/gpu-regions-zones#gpu_regions_and_zones[GPU regions and zones availability]. -- You can define which supported GPU to use for an instance by using the Machine API. diff --git a/modules/machineset-gcp-pd-disk-types.adoc b/modules/machineset-gcp-pd-disk-types.adoc index 59acd61f5865..ae1493c6538e 100644 --- a/modules/machineset-gcp-pd-disk-types.adoc +++ b/modules/machineset-gcp-pd-disk-types.adoc @@ -13,7 +13,7 @@ endif::[] You can configure the type of persistent disk that a machine set deploys machines on by editing the machine set YAML file. -For more information about persistent disk types, compatibility, regional availability, and limitations, see the GCP Compute Engine documentation about link:https://cloud.google.com/compute/docs/disks#pdspecs[persistent disks]. +For more information about persistent disk types, compatibility, regional availability, and limitations, see the {gcp-short} Compute Engine documentation about link:https://cloud.google.com/compute/docs/disks#pdspecs[persistent disks]. .Procedure diff --git a/modules/machineset-gcp-shielded-vms.adoc b/modules/machineset-gcp-shielded-vms.adoc index d313c6fe157a..47009040ec21 100644 --- a/modules/machineset-gcp-shielded-vms.adoc +++ b/modules/machineset-gcp-shielded-vms.adoc @@ -13,7 +13,7 @@ endif::[] By editing the machine set YAML file, you can configure the Shielded VM options that a machine set uses for machines that it deploys. -For more information about Shielded VM features and functionality, see the GCP Compute Engine documentation about link:https://cloud.google.com/compute/shielded-vm/docs/shielded-vm[Shielded VM]. +For more information about Shielded VM features and functionality, see the {gcp-short} Compute Engine documentation about link:https://cloud.google.com/compute/shielded-vm/docs/shielded-vm[Shielded VM]. .Procedure diff --git a/modules/machineset-non-guaranteed-instance.adoc b/modules/machineset-non-guaranteed-instance.adoc index 6ecb68390bb0..a9b39b0dca0b 100644 --- a/modules/machineset-non-guaranteed-instance.adoc +++ b/modules/machineset-non-guaranteed-instance.adoc @@ -25,7 +25,7 @@ ifdef::azure[] You can save on costs by creating a compute machine set running on Azure that deploys machines as non-guaranteed Spot VMs. Spot VMs utilize unused Azure capacity and are less expensive than standard VMs. You can use Spot VMs for workloads that can tolerate interruptions, such as batch or stateless, horizontally scalable workloads. endif::azure[] ifdef::gcp[] -You can save on costs by creating a compute machine set running on GCP that deploys machines as non-guaranteed preemptible VM instances. Preemptible VM instances utilize excess Compute Engine capacity and are less expensive than normal instances. You can use preemptible VM instances for workloads that can tolerate interruptions, such as batch or stateless, horizontally scalable workloads. +You can save on costs by creating a compute machine set running on {gcp-short} that deploys machines as non-guaranteed preemptible VM instances. Preemptible VM instances utilize excess Compute Engine capacity and are less expensive than normal instances. You can use preemptible VM instances for workloads that can tolerate interruptions, such as batch or stateless, horizontally scalable workloads. endif::gcp[] ifdef::aws[] @@ -52,7 +52,7 @@ Interruptions can occur when using Spot VMs for the following reasons: When Azure terminates an instance, a termination handler running on the Spot VM node deletes the machine resource. To satisfy the compute machine set `replicas` quantity, the compute machine set creates a machine that requests a Spot VM. endif::azure[] ifdef::gcp[] -GCP Compute Engine can terminate a preemptible VM instance at any time. Compute Engine sends a preemption notice to the user indicating that an interruption will occur in 30 seconds. {product-title} begins to remove the workloads from the affected instances when Compute Engine issues the preemption notice. An ACPI G3 Mechanical Off signal is sent to the operating system after 30 seconds if the instance is not stopped. The preemptible VM instance is then transitioned to a `TERMINATED` state by Compute Engine. +{gcp-full} Compute Engine can terminate a preemptible VM instance at any time. Compute Engine sends a preemption notice to the user indicating that an interruption will occur in 30 seconds. {product-title} begins to remove the workloads from the affected instances when Compute Engine issues the preemption notice. An ACPI G3 Mechanical Off signal is sent to the operating system after 30 seconds if the instance is not stopped. The preemptible VM instance is then transitioned to a `TERMINATED` state by Compute Engine. Interruptions can occur when using preemptible VM instances for the following reasons: @@ -60,7 +60,7 @@ Interruptions can occur when using preemptible VM instances for the following re * The supply of preemptible VM instances decreases * The instance reaches the end of the allotted 24-hour period for preemptible VM instances -When GCP terminates an instance, a termination handler running on the preemptible VM instance node deletes the machine resource. To satisfy the compute machine set `replicas` quantity, the compute machine set creates a machine that requests a preemptible VM instance. +When {gcp-short} terminates an instance, a termination handler running on the preemptible VM instance node deletes the machine resource. To satisfy the compute machine set `replicas` quantity, the compute machine set creates a machine that requests a preemptible VM instance. endif::gcp[] ifeval::["{context}" == "creating-machineset-aws"] diff --git a/modules/machineset-upi-reqs-ignition-config.adoc b/modules/machineset-upi-reqs-ignition-config.adoc index a5541ed18e25..1d31e8a0dfb7 100644 --- a/modules/machineset-upi-reqs-ignition-config.adoc +++ b/modules/machineset-upi-reqs-ignition-config.adoc @@ -2,7 +2,7 @@ // // * machine_management/creating_machinesets/creating-machineset-vsphere.adoc // -// Currently only in the vSphere compute machine set content, but we will want this for other platforms such as AWS and GCP. +// Currently only in the vSphere compute machine set content, but we will want this for other platforms such as AWS and {gcp-short}. ifeval::["{context}" == "creating-machineset-vsphere"] :vsphere: diff --git a/modules/machineset-upi-reqs-infra-id.adoc b/modules/machineset-upi-reqs-infra-id.adoc index 7e7c89cbb713..66701e97c5b5 100644 --- a/modules/machineset-upi-reqs-infra-id.adoc +++ b/modules/machineset-upi-reqs-infra-id.adoc @@ -2,7 +2,7 @@ // // * machine_management/creating_machinesets/creating-machineset-vsphere.adoc // -// Currently only in the vSphere compute machine set content, but we will want this for other platforms such as AWS and GCP. +// Currently only in the vSphere compute machine set content, but we will want this for other platforms such as AWS and {gcp-short}. ifeval::["{context}" == "creating-machineset-vsphere"] :vsphere: diff --git a/modules/machineset-yaml-gcp.adoc b/modules/machineset-yaml-gcp.adoc index 1d6a3260a961..b2530580c260 100644 --- a/modules/machineset-yaml-gcp.adoc +++ b/modules/machineset-yaml-gcp.adoc @@ -9,9 +9,9 @@ endif::[] :_mod-docs-content-type: REFERENCE [id="machineset-yaml-gcp_{context}"] -= Sample YAML for a compute machine set custom resource on GCP += Sample YAML for a compute machine set custom resource on {gcp-full} -This sample YAML defines a compute machine set that runs in Google Cloud Platform (GCP) and creates nodes that are labeled with +This sample YAML defines a compute machine set that runs in {gcp-first} and creates nodes that are labeled with ifndef::infra[`node-role.kubernetes.io/: ""`,] ifdef::infra[`node-role.kubernetes.io/infra: ""`,] where @@ -41,7 +41,7 @@ $ oc -n openshift-machine-api \ get machineset/-worker-a ---- -.Sample GCP `MachineSet` values +.Sample {gcp-short} `MachineSet` values [source,yaml] ---- apiVersion: machine.openshift.io/v1beta1 @@ -130,15 +130,15 @@ ifdef::infra[] endif::infra[] <3> Specify the path to the image that is used in current compute machine sets. + -To use a GCP Marketplace image, specify the offer to use: +To use a {gcp-short} Marketplace image, specify the offer to use: + -- * {product-title}: `\https://www.googleapis.com/compute/v1/projects/redhat-marketplace-public/global/images/redhat-coreos-ocp-413-x86-64-202305021736` * {opp}: `\https://www.googleapis.com/compute/v1/projects/redhat-marketplace-public/global/images/redhat-coreos-opp-413-x86-64-202305021736` * {oke}: `\https://www.googleapis.com/compute/v1/projects/redhat-marketplace-public/global/images/redhat-coreos-oke-413-x86-64-202305021736` -- -<4> Optional: Specify custom metadata in the form of a `key:value` pair. For example use cases, see the GCP documentation for link:https://cloud.google.com/compute/docs/metadata/setting-custom-metadata[setting custom metadata]. -<5> For ``, specify the name of the GCP project that you use for your cluster. +<4> Optional: Specify custom metadata in the form of a `key:value` pair. For example use cases, see the {gcp-short} documentation for link:https://cloud.google.com/compute/docs/metadata/setting-custom-metadata[setting custom metadata]. +<5> For ``, specify the name of the {gcp-short} project that you use for your cluster. <6> Specifies a single service account. Multiple service accounts are not supported. ifdef::infra[] <7> Specify a taint to prevent user workloads from being scheduled on infra nodes. diff --git a/modules/manually-create-identity-access-management.adoc b/modules/manually-create-identity-access-management.adoc index cbbcfb64e953..f90de4d428df 100644 --- a/modules/manually-create-identity-access-management.adoc +++ b/modules/manually-create-identity-access-management.adoc @@ -160,9 +160,9 @@ endif::cco-manual-mode[] .Procedure ifdef::google-cloud-platform[] -. Add the following granular permissions to the GCP account that the installation program uses: +. Add the following granular permissions to the {gcp-short} account that the installation program uses: + -.Required GCP permissions +.Required {gcp-short} permissions [%collapsible] ==== * compute.machineTypes.list diff --git a/modules/manually-maintained-credentials-upgrade.adoc b/modules/manually-maintained-credentials-upgrade.adoc index 50678b6e00d6..b987e142c567 100644 --- a/modules/manually-maintained-credentials-upgrade.adoc +++ b/modules/manually-maintained-credentials-upgrade.adoc @@ -108,10 +108,10 @@ data: ---- ==== + -.Sample GCP YAML files +.Sample {gcp-short} YAML files [%collapsible] ==== -.Sample GCP `CredentialsRequest` object with secrets +.Sample {gcp-short} `CredentialsRequest` object with secrets [source,yaml] ---- apiVersion: cloudcredential.openshift.io/v1 @@ -135,7 +135,7 @@ spec: ... ---- -.Sample GCP `Secret` object +.Sample {gcp-short} `Secret` object [source,yaml] ---- apiVersion: v1 diff --git a/modules/manually-removing-cloud-creds.adoc b/modules/manually-removing-cloud-creds.adoc index 3e0c2c359058..33d2c7eb1b86 100644 --- a/modules/manually-removing-cloud-creds.adoc +++ b/modules/manually-removing-cloud-creds.adoc @@ -21,7 +21,7 @@ If the credential is not present, the update might be blocked. .Prerequisites * Your cluster is installed on a platform that supports removing cloud credentials from the CCO. -Supported platforms are AWS and GCP. +Supported platforms are AWS and {gcp-short}. .Procedure @@ -37,7 +37,7 @@ Supported platforms are AWS and GCP. |AWS |`aws-creds` -|GCP +|{gcp-full} |`gcp-credentials` |=== diff --git a/modules/manually-rotating-cloud-creds.adoc b/modules/manually-rotating-cloud-creds.adoc index 4dcd86169d63..d9e599b877fc 100644 --- a/modules/manually-rotating-cloud-creds.adoc +++ b/modules/manually-rotating-cloud-creds.adoc @@ -35,11 +35,11 @@ You can also use the command-line interface to complete all parts of this proced * Your cluster is installed on a platform that supports rotating cloud credentials manually with the CCO mode that you are using: ifndef::passthrough[] -** For mint mode, Amazon Web Services (AWS) and Google Cloud Platform (GCP) are supported. +** For mint mode, Amazon Web Services (AWS) and {gcp-first} are supported. endif::passthrough[] ifndef::mint[] -** For passthrough mode, Amazon Web Services (AWS), Microsoft Azure, Google Cloud Platform (GCP), {rh-openstack-first}, and VMware vSphere are supported. +** For passthrough mode, Amazon Web Services (AWS), Microsoft Azure, {gcp-first}, {rh-openstack-first}, and VMware vSphere are supported. endif::mint[] * You have changed the credentials that are used to interface with your cloud provider. @@ -65,7 +65,7 @@ ifndef::mint[] |`azure-credentials` endif::mint[] -|GCP +|{gcp-full} |`gcp-credentials` ifndef::mint[] @@ -188,7 +188,7 @@ To verify that the credentials have changed: $ oc -n openshift-cloud-credential-operator get CredentialsRequest -o json | jq -r '.items[] | select (.spec[].kind=="") | .metadata.name' ---- + -Where `` is the corresponding value for your cloud provider: `AWSProviderSpec` for AWS, `AzureProviderSpec` for Azure, or `GCPProviderSpec` for GCP. +Where `` is the corresponding value for your cloud provider: `AWSProviderSpec` for AWS, `AzureProviderSpec` for Azure, or `GCPProviderSpec` for {gcp-short}. + .Example output for AWS + diff --git a/modules/migrating-to-multi-arch-cli.adoc b/modules/migrating-to-multi-arch-cli.adoc index f9d33b4e46c4..7c0c1b6aad38 100644 --- a/modules/migrating-to-multi-arch-cli.adoc +++ b/modules/migrating-to-multi-arch-cli.adoc @@ -14,7 +14,7 @@ For more information on how to update your cluster version, see _Updating a cluster using the web console_ or _Updating a cluster using the CLI_. * You have installed the OpenShift CLI (`oc`) that matches the version for your current cluster. * Your `oc` client is updated to at least verion 4.13.0. -* Your {product-title} cluster is installed on AWS, Azure, GCP, bare metal or IBM P/Z platforms. +* Your {product-title} cluster is installed on AWS, Azure, {gcp-short}, bare metal or IBM P/Z platforms. + For more information on selecting a supported platform for your cluster installation, see _Selecting a cluster installation type_. diff --git a/modules/migration-adding-replication-repository-to-cam.adoc b/modules/migration-adding-replication-repository-to-cam.adoc index 27823fe123be..c274a810805d 100644 --- a/modules/migration-adding-replication-repository-to-cam.adoc +++ b/modules/migration-adding-replication-repository-to-cam.adoc @@ -14,7 +14,7 @@ You can add an object storage as a replication repository to the {mtc-first} web * Amazon Web Services (AWS) S3 * Multi-Cloud Object Gateway (MCG) * Generic S3 object storage, for example, Minio or Ceph S3 -* Google Cloud Provider (GCP) +* Google Cloud Provider ({gcp-full}) * Microsoft Azure Blob .Prerequisites @@ -38,11 +38,11 @@ You can add an object storage as a replication repository to the {mtc-first} web ** *Require SSL verification*: Clear this checkbox if you are using a generic S3 provider. ** If you created a custom CA certificate bundle for self-signed certificates, click *Browse* and browse to the Base64-encoded file. -* *GCP*: +* *{gcp-full}*: ** *Replication repository name*: Specify the replication repository name in the {mtc-short} web console. -** *GCP bucket name*: Specify the name of the GCP bucket. -** *GCP credential JSON blob*: Specify the string in the `credentials-velero` file. +** *{gcp-full} bucket name*: Specify the name of the {gcp-full} bucket. +** *{gcp-full} credential JSON blob*: Specify the string in the `credentials-velero` file. * *Azure*: diff --git a/modules/migration-configuring-gcp.adoc b/modules/migration-configuring-gcp.adoc index 923715d9b191..4bcc11f93b62 100644 --- a/modules/migration-configuring-gcp.adoc +++ b/modules/migration-configuring-gcp.adoc @@ -6,14 +6,14 @@ :_mod-docs-content-type: PROCEDURE [id="migration-configuring-gcp_{context}"] -= Configuring Google Cloud Platform += Configuring {gcp-full} [role="_abstract"] ifdef::installing-3-4,installing-mtc[] -You configure a Google Cloud Platform (GCP) storage bucket as a replication repository for the {mtc-first}. +You configure a {gcp-first} storage bucket as a replication repository for the {mtc-first}. endif::[] ifdef::installing-oadp-gcp[] -You configure Google Cloud Platform (GCP) for the OpenShift API for Data Protection (OADP). +You configure {gcp-first} for the OpenShift API for Data Protection (OADP). endif::[] .Prerequisites @@ -21,7 +21,7 @@ endif::[] * You must have the `gcloud` and `gsutil` CLI tools installed. See the link:https://cloud.google.com/sdk/docs/[Google cloud documentation] for details. ifdef::installing-3-4,installing-mtc[] -* The GCP storage bucket must be accessible to the source and target clusters. +* The {gcp-short} storage bucket must be accessible to the source and target clusters. * If you are using the snapshot copy method: ** The source and target clusters must be in the same region. ** The source and target clusters must have the same storage class. @@ -30,7 +30,7 @@ endif::[] .Procedure -. Log in to GCP: +. Log in to {gcp-short}: + [source,terminal] ---- @@ -139,9 +139,9 @@ $ gcloud iam service-accounts keys create credentials-velero \ ---- ifdef::installing-3-4,installing-mtc[] + -You use the `credentials-velero` file to add GCP as a replication repository. +You use the `credentials-velero` file to add {gcp-short} as a replication repository. endif::[] ifdef::installing-oadp-gcp[] + -You use the `credentials-velero` file to create a `Secret` object for GCP before you install the Data Protection Application. +You use the `credentials-velero` file to create a `Secret` object for {gcp-short} before you install the Data Protection Application. endif::[] diff --git a/modules/minimum-required-permissions-ipi-gcp-provided-sas.adoc b/modules/minimum-required-permissions-ipi-gcp-provided-sas.adoc index c1acd5c846dc..a697100ebcfe 100644 --- a/modules/minimum-required-permissions-ipi-gcp-provided-sas.adoc +++ b/modules/minimum-required-permissions-ipi-gcp-provided-sas.adoc @@ -4,14 +4,14 @@ :_mod-docs-content-type: PROCEDURE [id="minimum-required-permissions-ipi-gcp-provided-sas_{context}"] -= Required GCP permissions for user-provided service accounts += Required {gcp-short} permissions for user-provided service accounts When you are installing a cluster, the compute and control plane nodes require their own service accounts. By default, the installation program creates a service account for the control plane and compute nodes. -The service account that the installation program uses requires the roles and permissions that are listed in the _Creating a service account in GCP_ section, as well as the `resourcemanager.projects.getIamPolicy` and `resourcemanager.projects.setIamPolicy` permissions. +The service account that the installation program uses requires the roles and permissions that are listed in the _Creating a service account in {gcp-full}_ section, as well as the `resourcemanager.projects.getIamPolicy` and `resourcemanager.projects.setIamPolicy` permissions. These permissions should be applied to the service account in the host project. If this approach does not meet the security requirements of your organization, you can provide a service account email address for the control plane or compute nodes in the `install-config.yaml` file. -For more information, see the _Installation configuration parameters for GCP_ page. +For more information, see the _Installation configuration parameters for {gcp-full}_ page. If you provide a service account for control plane nodes during an installation into a shared VPC, you must grant that service account the `roles/compute.networkUser` role in the host project. If you want the installation program to automatically create firewall rules when you supply the control plane service account, you must grant that service account the `roles/compute.networkAdmin` and `roles/compute.securityAdmin` roles in the host project. If you only supply the `roles/compute.networkUser` role, you must create the firewall rules manually. diff --git a/modules/minimum-required-permissions-ipi-gcp-xpn.adoc b/modules/minimum-required-permissions-ipi-gcp-xpn.adoc index 07c74c0329da..3146e3482560 100644 --- a/modules/minimum-required-permissions-ipi-gcp-xpn.adoc +++ b/modules/minimum-required-permissions-ipi-gcp-xpn.adoc @@ -4,7 +4,7 @@ :_mod-docs-content-type: PROCEDURE [id="minimum-required-permissions-ipi-gcp-xpn_{context}"] -= Required GCP permissions for shared VPC installations += Required {gcp-short} permissions for shared VPC installations When you are installing a cluster to a link:https://cloud.google.com/vpc/docs/shared-vpc[shared VPC], you must configure the service account for both the host project and the service project. If you are not installing to a shared VPC, you can skip this section. diff --git a/modules/minimum-required-permissions-ipi-gcp.adoc b/modules/minimum-required-permissions-ipi-gcp.adoc index 4c5dc727f610..c68493dc3510 100644 --- a/modules/minimum-required-permissions-ipi-gcp.adoc +++ b/modules/minimum-required-permissions-ipi-gcp.adoc @@ -4,7 +4,7 @@ :_mod-docs-content-type: REFERENCE [id="minimum-required-permissions-ipi-gcp_{context}"] -= Required GCP permissions for installer-provisioned infrastructure += Required {gcp-short} permissions for installer-provisioned infrastructure When you attach the `Owner` role to the service account that you create, you grant that service account all permissions, including those that are required to install {product-title}. @@ -162,7 +162,7 @@ If your organization's security policies require a more restrictive set of permi * `compute.regionHealthChecks.useReadOnly` ==== -.Required permissions to get GCP zone and region related information +.Required permissions to get {gcp-short} zone and region related information [%collapsible] ==== * `compute.globalOperations.get` diff --git a/modules/minimum-required-permissions-upi-gcp.adoc b/modules/minimum-required-permissions-upi-gcp.adoc index 1af606bb7e44..425d1a680cf8 100644 --- a/modules/minimum-required-permissions-upi-gcp.adoc +++ b/modules/minimum-required-permissions-upi-gcp.adoc @@ -5,7 +5,7 @@ // * installing/installing_gcp/installing-gcp-user-infra-vpc.adoc [id="minimum-required-permissions-upi-gcp_{context}"] -= Required GCP permissions for user-provisioned infrastructure += Required {gcp-short} permissions for user-provisioned infrastructure When you attach the `Owner` role to the service account that you create, you grant that service account all permissions, including those that are required to install {product-title}. @@ -161,7 +161,7 @@ If your organization’s security policies require a more restrictive set of per * `compute.regionHealthChecks.useReadOnly` ==== -.Required permissions to get GCP zone and region related information +.Required permissions to get {gcp-short} zone and region related information [%collapsible] ==== * `compute.globalOperations.get` diff --git a/modules/monitoring-common-terms.adoc b/modules/monitoring-common-terms.adoc index eeafa21f76d0..99c7d6d3650c 100644 --- a/modules/monitoring-common-terms.adoc +++ b/modules/monitoring-common-terms.adoc @@ -82,7 +82,7 @@ ifndef::openshift-dedicated,openshift-rosa,openshift-rosa-hcp[] {product-title} supports many types of storage, both for on-premise and cloud providers. endif::openshift-dedicated,openshift-rosa,openshift-rosa-hcp[] ifdef::openshift-dedicated[] -{product-title} supports many types of storage on AWS and GCP. +{product-title} supports many types of storage on AWS and {gcp-short}. endif::openshift-dedicated[] ifdef::openshift-rosa,openshift-rosa-hcp[] {product-title} supports many types of storage on AWS. diff --git a/modules/multi-architecture-modify-machine-set-gcp.adoc b/modules/multi-architecture-modify-machine-set-gcp.adoc index dc62ba97ae44..efd631c51a2b 100644 --- a/modules/multi-architecture-modify-machine-set-gcp.adoc +++ b/modules/multi-architecture-modify-machine-set-gcp.adoc @@ -4,7 +4,7 @@ :_mod-docs-content-type: PROCEDURE [id="multi-architecture-modify-machine-set-gcp_{context}"] -= Adding a multi-architecture compute machine set to your GCP cluster += Adding a multi-architecture compute machine set to your {gcp-short} cluster After creating a multi-architecture cluster, you can add nodes with different architectures. @@ -18,7 +18,7 @@ include::snippets/about-multiarch-tuning-operator.adoc[] .Prerequisites * You installed the {oc-first}. -* You used the installation program to create a 64-bit x86 or 64-bit ARM single-architecture GCP cluster with the multi-architecture installer binary. +* You used the installation program to create a 64-bit x86 or 64-bit ARM single-architecture {gcp-short} cluster with the multi-architecture installer binary. .Procedure @@ -27,7 +27,7 @@ include::snippets/about-multiarch-tuning-operator.adoc[] . Create a YAML file, and add the configuration to create a compute machine set to control the 64-bit ARM or 64-bit x86 compute nodes in your cluster. + -- -.Example `MachineSet` object for a GCP 64-bit ARM or 64-bit x86 compute node +.Example `MachineSet` object for a {gcp-short} 64-bit ARM or 64-bit x86 compute node [source,yaml] ---- apiVersion: machine.openshift.io/v1beta1 @@ -125,9 +125,9 @@ Use the `project` and `name` parameters from the output to create the path to im ---- $ projects//global/images/ ---- -<4> Optional: Specify custom metadata in the form of a `key:value` pair. For example use cases, see the GCP documentation for link:https://cloud.google.com/compute/docs/metadata/setting-custom-metadata[setting custom metadata]. -<5> Specify a machine type that aligns with the CPU architecture of the chosen OS image. For more information, see "Tested instance types for GCP on 64-bit ARM infrastructures". -<6> Specify the name of the GCP project that you use for your cluster. +<4> Optional: Specify custom metadata in the form of a `key:value` pair. For example use cases, see the {gcp-short} documentation for link:https://cloud.google.com/compute/docs/metadata/setting-custom-metadata[setting custom metadata]. +<5> Specify a machine type that aligns with the CPU architecture of the chosen OS image. For more information, see "Tested instance types for {gcp-full} on 64-bit ARM infrastructures". +<6> Specify the name of the {gcp-short} project that you use for your cluster. <7> Specify the region. For example, `us-central1`. Ensure that the zone you select has machines with the required architecture. -- diff --git a/modules/nodes-nodes-rtkernel-arguments.adoc b/modules/nodes-nodes-rtkernel-arguments.adoc index f88195f787bf..f3a021ff8bc0 100644 --- a/modules/nodes-nodes-rtkernel-arguments.adoc +++ b/modules/nodes-nodes-rtkernel-arguments.adoc @@ -15,7 +15,7 @@ If your {product-title} workloads require these real-time characteristics, you c * Currently, real-time kernel is supported only on worker nodes, and only for radio access network (RAN) use. * The following procedure is fully supported with bare metal installations that use systems that are certified for Red Hat Enterprise Linux for Real Time 8. * Real-time support in {product-title} is limited to specific subscriptions. -* The following procedure is also supported for use with Google Cloud Platform. +* The following procedure is also supported for use with {gcp-full}. .Prerequisites * Have a running {product-title} cluster (version 4.4 or later). diff --git a/modules/nvidia-gpu-csps.adoc b/modules/nvidia-gpu-csps.adoc index bd51849d9580..4a4b1f2e382f 100644 --- a/modules/nvidia-gpu-csps.adoc +++ b/modules/nvidia-gpu-csps.adoc @@ -12,7 +12,7 @@ ifdef::openshift-rosa,openshift-dedicated[] endif::openshift-rosa,openshift-dedicated[] ifndef::openshift-dedicated,openshift-rosa[] -You can deploy {product-title} to one of the major cloud service providers (CSPs): Amazon Web Services ({aws-short}), Google Cloud Platform ({gcp-short}), or Microsoft Azure. +You can deploy {product-title} to one of the major cloud service providers (CSPs): Amazon Web Services ({aws-short}), {gcp-full} ({gcp-short}), or Microsoft Azure. Two modes of operation are available: a fully managed deployment and a self-managed deployment. diff --git a/modules/nvidia-gpu-gcp-adding-a-gpu-node.adoc b/modules/nvidia-gpu-gcp-adding-a-gpu-node.adoc index 85614d1ebcfb..b4ea66eb26eb 100644 --- a/modules/nvidia-gpu-gcp-adding-a-gpu-node.adoc +++ b/modules/nvidia-gpu-gcp-adding-a-gpu-node.adoc @@ -6,7 +6,7 @@ [id="nvidia-gpu-gcp-adding-a-gpu-node_{context}"] = Adding a GPU node to an existing {product-title} cluster -You can copy and modify a default compute machine set configuration to create a GPU-enabled machine set and machines for the GCP cloud provider. +You can copy and modify a default compute machine set configuration to create a GPU-enabled machine set and machines for the {gcp-short} cloud provider. The following table lists the validated instance types: @@ -144,7 +144,7 @@ onHostMaintenance: Terminate } ---- -. View the existing nodes, machines, and machine sets by running the following command. Note that each node is an instance of a machine definition with a specific GCP region and {product-title} role. +. View the existing nodes, machines, and machine sets by running the following command. Note that each node is an instance of a machine definition with a specific {gcp-short} region and {product-title} role. + [source,terminal] ---- @@ -165,7 +165,7 @@ myclustername-2pt9p-worker-c-6pbg6.c.openshift-qe.internal Ready work myclustername-2pt9p-worker-gpu-a-wxcr6.c.openshift-qe.internal Ready worker 4h35m v1.33.4 ---- -. View the machines and machine sets that exist in the `openshift-machine-api` namespace by running the following command. Each compute machine set is associated with a different availability zone within the GCP region. The installer automatically load balances compute machines across availability zones. +. View the machines and machine sets that exist in the `openshift-machine-api` namespace by running the following command. Each compute machine set is associated with a different availability zone within the {gcp-short} region. The installer automatically load balances compute machines across availability zones. + [source,terminal] ---- diff --git a/modules/nw-control-dns-records-public-managed-zone-gcp.adoc b/modules/nw-control-dns-records-public-managed-zone-gcp.adoc index 9bba67582697..1e59b06970d1 100644 --- a/modules/nw-control-dns-records-public-managed-zone-gcp.adoc +++ b/modules/nw-control-dns-records-public-managed-zone-gcp.adoc @@ -5,9 +5,9 @@ :_mod-docs-content-type: PROCEDURE [id="nw-control-dns-records-public-managed-zone-gcp_{context}"] -= Creating DNS records on a public managed zone for GCP += Creating DNS records on a public managed zone for {gcp-full} -You can create DNS records on a public managed zone for GCP by using the External DNS Operator. +You can create DNS records on a public managed zone for {gcp-short} by using the External DNS Operator. .Prerequisites @@ -92,7 +92,7 @@ spec: <5> Defines the provider type. <6> You can define options for the source of DNS records. <7> If the source type is `OpenShiftRoute`, you can pass the OpenShift Ingress Controller name. External DNS selects the canonical hostname of that router as the target while creating CNAME record. -<8> Defines the `route` resource as the source for GCP DNS records. +<8> Defines the `route` resource as the source for {gcp-short} DNS records. . Check the DNS records created for {product-title} routes by running the following command: + diff --git a/modules/nw-egress-ips-about.adoc b/modules/nw-egress-ips-about.adoc index c6a4b10bce28..6eed7c7a1334 100644 --- a/modules/nw-egress-ips-about.adoc +++ b/modules/nw-egress-ips-about.adoc @@ -42,7 +42,7 @@ The Egress IP address feature that runs on a primary host network is supported o | VMware vSphere | Yes | {rh-openstack-first} | Yes | Amazon Web Services (AWS) | Yes -| Google Cloud Platform (GCP) | Yes +| {gcp-first} | Yes | Microsoft Azure | Yes | {ibm-z-name} and {ibm-linuxone-name} | Yes | {ibm-z-name} and {ibm-linuxone-name} for {op-system-base-full} KVM | Yes @@ -89,9 +89,9 @@ To confirm the IP capacity and subnets for any node in your public cloud environ The annotation value is an array with a single object with fields that provide the following information for the primary network interface: -* `interface`: Specifies the interface ID on AWS and Azure and the interface name on GCP. +* `interface`: Specifies the interface ID on AWS and Azure and the interface name on {gcp-short}. * `ifaddr`: Specifies the subnet mask for one or both IP address families. -* `capacity`: Specifies the IP address capacity for the node. On AWS, the IP address capacity is provided per IP address family. On Azure and GCP, the IP address capacity includes both IPv4 and IPv6 addresses. +* `capacity`: Specifies the IP address capacity for the node. On AWS, the IP address capacity is provided per IP address family. On Azure and {gcp-short}, the IP address capacity includes both IPv4 and IPv6 addresses. Automatic attachment and detachment of egress IP addresses for traffic between nodes are available. This allows for traffic from many pods in namespaces to have a consistent source IP address to locations outside of the cluster. @@ -116,7 +116,7 @@ cloud.network.openshift.io/egress-ipconfig: [ ---- ifndef::openshift-rosa[] -.Example `cloud.network.openshift.io/egress-ipconfig` annotation on GCP +.Example `cloud.network.openshift.io/egress-ipconfig` annotation on {gcp-full} [source,yaml] ---- cloud.network.openshift.io/egress-ipconfig: [ @@ -143,9 +143,9 @@ On AWS, constraints on IP address assignments depend on the instance type config ifndef::openshift-rosa[] [id="nw-egress-ips-capacity-gcp_{context}"] -=== Google Cloud Platform (GCP) IP address capacity limits +=== {gcp-first} IP address capacity limits -On GCP, the networking model implements additional node IP addresses through IP address aliasing, rather than IP address assignments. However, IP address capacity maps directly to IP aliasing capacity. +On {gcp-short}, the networking model implements additional node IP addresses through IP address aliasing, rather than IP address assignments. However, IP address capacity maps directly to IP aliasing capacity. The following capacity limits exist for IP aliasing assignment: diff --git a/modules/nw-external-dns-operator-configuration-parameters.adoc b/modules/nw-external-dns-operator-configuration-parameters.adoc index c7be49f55afc..0ae1923472c5 100644 --- a/modules/nw-external-dns-operator-configuration-parameters.adoc +++ b/modules/nw-external-dns-operator-configuration-parameters.adoc @@ -24,7 +24,7 @@ spec: credentials: name: aws-access-key <2> ---- -<1> Defines available options such as AWS, GCP, Azure, and Infoblox. +<1> Defines available options such as AWS, {gcp-short}, Azure, and Infoblox. <2> Defines a secret name for your cloud provider. |`zones` diff --git a/modules/nw-gcp-installing-global-access-configuration.adoc b/modules/nw-gcp-installing-global-access-configuration.adoc index bdbcc755b615..e4f5d328ca0e 100644 --- a/modules/nw-gcp-installing-global-access-configuration.adoc +++ b/modules/nw-gcp-installing-global-access-configuration.adoc @@ -5,8 +5,8 @@ :_mod-docs-content-type: PROCEDURE [id="nw-gcp-global-access-configuration_{context}"] -= Create an Ingress Controller with global access on GCP -You can create an Ingress Controller that has global access to a Google Cloud Platform (GCP) cluster. Global access is only available to Ingress Controllers using internal load balancers. += Create an Ingress Controller with global access on {gcp-first} +You can create an Ingress Controller that has global access to a {gcp-full} cluster. Global access is only available to Ingress Controllers using internal load balancers. .Prerequisites @@ -14,7 +14,7 @@ You can create an Ingress Controller that has global access to a Google Cloud Pl .Procedure -Create an Ingress Controller with global access on a new GCP cluster. +Create an Ingress Controller with global access on a new {gcp-short} cluster. . Change to the directory that contains the installation program and create a manifest file: + diff --git a/modules/nw-ingress-controller-configuration-gcp-global-access.adoc b/modules/nw-ingress-controller-configuration-gcp-global-access.adoc index 87ad7e968abc..e407fa779cb7 100644 --- a/modules/nw-ingress-controller-configuration-gcp-global-access.adoc +++ b/modules/nw-ingress-controller-configuration-gcp-global-access.adoc @@ -4,15 +4,15 @@ :_mod-docs-content-type: PROCEDURE [id="nw-ingress-controller-configuration-gcp-global-access_{context}"] -= Configuring global access for an Ingress Controller on GCP += Configuring global access for an Ingress Controller on {gcp-full} -An Ingress Controller created on GCP with an internal load balancer generates an internal IP address for the service. A cluster administrator can specify the global access option, which enables clients in any region within the same VPC network and compute region as the load balancer, to reach the workloads running on your cluster. +An Ingress Controller created on {gcp-short} with an internal load balancer generates an internal IP address for the service. A cluster administrator can specify the global access option, which enables clients in any region within the same VPC network and compute region as the load balancer, to reach the workloads running on your cluster. -For more information, see the GCP documentation for link:https://cloud.google.com/kubernetes-engine/docs/how-to/internal-load-balancing#global_access[global access]. +For more information, see the {gcp-short} documentation for link:https://cloud.google.com/kubernetes-engine/docs/how-to/internal-load-balancing#global_access[global access]. .Prerequisites -* You deployed an {product-title} cluster on GCP infrastructure. +* You deployed an {product-title} cluster on {gcp-short} infrastructure. * You configured an Ingress Controller to use an internal load balancer. * You installed the OpenShift CLI (`oc`). @@ -58,4 +58,4 @@ $ oc -n openshift-ingress-operator edit ingresscontroller/default $ oc -n openshift-ingress edit svc/router-default -o yaml ---- + -The output shows that global access is enabled for GCP with the annotation, `networking.gke.io/internal-load-balancer-allow-global-access`. +The output shows that global access is enabled for {gcp-short} with the annotation, `networking.gke.io/internal-load-balancer-allow-global-access`. diff --git a/modules/nw-ingress-controller-configuration-parameters.adoc b/modules/nw-ingress-controller-configuration-parameters.adoc index 640dff992f9d..49eb611ca668 100644 --- a/modules/nw-ingress-controller-configuration-parameters.adoc +++ b/modules/nw-ingress-controller-configuration-parameters.adoc @@ -34,7 +34,7 @@ If empty, the default value is `ingress.config.openshift.io/cluster` `.spec.doma For cloud environments, use the `loadBalancer` field to configure the endpoint publishing strategy for your Ingress Controller. ifndef::openshift-rosa,openshift-dedicated[] -On GCP, AWS, and Azure you can configure the following `endpointPublishingStrategy` fields: +On {gcp-short}, AWS, and Azure you can configure the following `endpointPublishingStrategy` fields: endif::openshift-rosa,openshift-dedicated[] ifdef::openshift-rosa,openshift-dedicated[] @@ -50,15 +50,15 @@ ifdef::openshift-rosa,openshift-dedicated[] * Amazon Web Services (AWS): `LoadBalancerService` (with External scope) endif::openshift-rosa,openshift-dedicated[] ifdef::openshift-dedicated[] -* Google Cloud Platform (GCP): `LoadBalancerService` (with External scope) +* {gcp-first}: `LoadBalancerService` (with External scope) endif::openshift-dedicated[] ifndef::openshift-rosa,openshift-dedicated[] * Azure: `LoadBalancerService` (with External scope) -* Google Cloud Platform (GCP): `LoadBalancerService` (with External scope) +* {gcp-first}: `LoadBalancerService` (with External scope) endif::openshift-rosa,openshift-dedicated[] ifndef::openshift-rosa[] -For most platforms, the `endpointPublishingStrategy` value can be updated. On GCP, you can configure the following `endpointPublishingStrategy` fields: +For most platforms, the `endpointPublishingStrategy` value can be updated. On {gcp-short}, you can configure the following `endpointPublishingStrategy` fields: * `loadBalancer.scope` * `loadbalancer.providerParameters.gcp.clientAccess` diff --git a/modules/nw-udn-limitations.adoc b/modules/nw-udn-limitations.adoc index 98359a4bc12e..3584a84e3f88 100644 --- a/modules/nw-udn-limitations.adoc +++ b/modules/nw-udn-limitations.adoc @@ -34,6 +34,6 @@ While user-defined networks (UDN) offer highly customizable network configuratio ** Egress IP does not work without a default gateway. -** Egress IP does not work on Google Cloud Platform (GCP). +** Egress IP does not work on {gcp-first}. ** Egress IP does not work with multiple gateways and instead will forward all traffic to a single gateway. \ No newline at end of file diff --git a/modules/oadp-1-5-1-release-notes.adoc b/modules/oadp-1-5-1-release-notes.adoc index 0f8b1489f813..d560ce5ab75a 100644 --- a/modules/oadp-1-5-1-release-notes.adoc +++ b/modules/oadp-1-5-1-release-notes.adoc @@ -135,9 +135,9 @@ When the `ephemeral-storage` parameter is configured and running file system res link:https://issues.redhat.com/browse/OADP-4855[OADP-4855] -*GCP VSL backups fail with Workload Identity because of invalid project configuration* +*{gcp-full} VSL backups fail with Workload Identity because of invalid project configuration* -When performing a `volumeSnapshotLocation` (VSL) backup on GCP Workload Identity, the Velero GCP plugin creates an invalid API request if the GCP project is also specified in the `snapshotLocations` configuration of `DataProtectionApplication` (DPA). As a consequence, the GCP API returns a `RESOURCE_PROJECT_INVALID` error, and the backup job finishes with a `PartiallyFailed` status. No known workaround exists. +When performing a `volumeSnapshotLocation` (VSL) backup on {gcp-short} Workload Identity, the Velero {gcp-short} plugin creates an invalid API request if the {gcp-short} project is also specified in the `snapshotLocations` configuration of `DataProtectionApplication` (DPA). As a consequence, the {gcp-short} API returns a `RESOURCE_PROJECT_INVALID` error, and the backup job finishes with a `PartiallyFailed` status. No known workaround exists. link:https://issues.redhat.com/browse/OADP-6697[OADP-6697] diff --git a/modules/oadp-backing-and-restoring-from-cluster-to-cluster.adoc b/modules/oadp-backing-and-restoring-from-cluster-to-cluster.adoc index 24c88e999be1..a41fc19111d7 100644 --- a/modules/oadp-backing-and-restoring-from-cluster-to-cluster.adoc +++ b/modules/oadp-backing-and-restoring-from-cluster-to-cluster.adoc @@ -11,7 +11,7 @@ In general, you back up data from one {product-title} cluster and restore it on .Prerequisites -* All relevant prerequisites for backing up and restoring on your platform (for example, AWS, Microsoft Azure, GCP, and so on), especially the prerequisites for the Data Protection Application (DPA), are described in the relevant sections of this guide. +* All relevant prerequisites for backing up and restoring on your platform (for example, AWS, Microsoft Azure, {gcp-short}, and so on), especially the prerequisites for the Data Protection Application (DPA), are described in the relevant sections of this guide. .Procedure diff --git a/modules/oadp-configuring-velero-plugins.adoc b/modules/oadp-configuring-velero-plugins.adoc index e1440e616646..00495e6c91e3 100644 --- a/modules/oadp-configuring-velero-plugins.adoc +++ b/modules/oadp-configuring-velero-plugins.adoc @@ -20,7 +20,7 @@ You can install any of the following default Velero cloud provider plugins when * `aws` (Amazon Web Services) ifndef::openshift-rosa,openshift-rosa-hcp[] -* `gcp` (Google Cloud Platform) +* `gcp` ({gcp-full}) * `azure` (Microsoft Azure) endif::openshift-rosa,openshift-rosa-hcp[] * `openshift` (OpenShift Velero plugin) diff --git a/modules/oadp-gcp-wif-cloud-authentication.adoc b/modules/oadp-gcp-wif-cloud-authentication.adoc index 261c4688834c..ae53e4317a52 100644 --- a/modules/oadp-gcp-wif-cloud-authentication.adoc +++ b/modules/oadp-gcp-wif-cloud-authentication.adoc @@ -18,9 +18,9 @@ Workload identity federation handles encrypting and decrypting certificates, ext Google workload identity federation is available for OADP 1.3.x and later. ==== -When backing up volumes, OADP on GCP with Google workload identity federation authentication only supports CSI snapshots. +When backing up volumes, OADP on {gcp-short} with Google workload identity federation authentication only supports CSI snapshots. -OADP on GCP with Google workload identity federation authentication does not support Volume Snapshot Locations (VSL) backups. For more details, see xref:oadp-gcp-wif-known-issues[Google workload identity federation known issues]. +OADP on {gcp-short} with Google workload identity federation authentication does not support Volume Snapshot Locations (VSL) backups. For more details, see xref:oadp-gcp-wif-known-issues[Google workload identity federation known issues]. If you do not use Google workload identity federation cloud authentication, continue to _Installing the Data Protection Application_. diff --git a/modules/oadp-s3-compatible-backup-storage-providers.adoc b/modules/oadp-s3-compatible-backup-storage-providers.adoc index e369afc08daf..b37fed72dcd1 100644 --- a/modules/oadp-s3-compatible-backup-storage-providers.adoc +++ b/modules/oadp-s3-compatible-backup-storage-providers.adoc @@ -32,7 +32,7 @@ The following AWS S3 compatible object storage providers are fully supported by ==== The following compatible object storage providers are supported and have their own Velero object store plugins: -* Google Cloud Platform (GCP) +* {gcp-first} * Microsoft Azure ==== diff --git a/modules/oadp-support-backup-data-immutability.adoc b/modules/oadp-support-backup-data-immutability.adoc index 70227e2db50c..3da50b3bec0b 100644 --- a/modules/oadp-support-backup-data-immutability.adoc +++ b/modules/oadp-support-backup-data-immutability.adoc @@ -15,7 +15,7 @@ See the following list for specific cloud provider limitations: * {oadp-short} backups are not supported and might not work as expected when you enable immutability on Azure Storage Blob. -* GCP Cloud storage policy only supports bucket-level immutability. Therefore, it is not feasible to implement it in the GCP environment. +* {gcp-short} storage policy only supports bucket-level immutability. Therefore, it is not feasible to implement it in the {gcp-short} environment. Depending on your storage provider, the immutability options are called differently: @@ -25,6 +25,6 @@ Depending on your storage provider, the immutability options are called differen * Bucket versioning * Write Once Read Many (WORM) buckets -The primary reason for the absence of support for other S3-compatible object storage is that {oadp-short} initially saves the state of a backup as _finalizing_ and then verifies whether any asynchronous operations are in progress. +The primary reason for the absence of support for other S3-compatible object storage is that {oadp-short} initially saves the state of a backup as _finalizing_ and then verifies whether any asynchronous operations are in progress. diff --git a/modules/ocm-networking-tab-concept.adoc b/modules/ocm-networking-tab-concept.adoc index f43e331a3dc8..7e31af2b2366 100644 --- a/modules/ocm-networking-tab-concept.adoc +++ b/modules/ocm-networking-tab-concept.adoc @@ -21,6 +21,6 @@ endif::openshift-rosa[] ifndef::openshift-rosa,openshift-rosa-hcp[] [IMPORTANT] ==== -{cluster-manager-first} does not support the networking tab for a Google Cloud Platform (GCP), non-CCS cluster running in a Red Hat GCP project. +{cluster-manager-first} does not support the networking tab for a {gcp-first}, non-CCS cluster running in a Red Hat {gcp-first} project. ==== endif::openshift-rosa,openshift-rosa-hcp[] diff --git a/modules/openshift-architecture-common-terms.adoc b/modules/openshift-architecture-common-terms.adoc index 5405702acd96..ef83c697fe27 100644 --- a/modules/openshift-architecture-common-terms.adoc +++ b/modules/openshift-architecture-common-terms.adoc @@ -139,7 +139,7 @@ A command-line tool to run {product-title} commands on the terminal. ifndef::openshift-dedicated,openshift-rosa,openshift-rosa-hcp[] OpenShift Dedicated:: -A managed {op-system-base} {product-title} offering on Amazon Web Services (AWS) and Google Cloud Platform (GCP). OpenShift Dedicated focuses on building and scaling applications. +A managed {op-system-base} {product-title} offering on Amazon Web Services (AWS) and {gcp-first}. OpenShift Dedicated focuses on building and scaling applications. endif::openshift-dedicated,openshift-rosa,openshift-rosa-hcp[] OpenShift Update Service (OSUS):: diff --git a/modules/osd-create-cluster-ccs-gcp.adoc b/modules/osd-create-cluster-ccs-gcp.adoc index b720b3ce2b17..23c8934ecf3a 100644 --- a/modules/osd-create-cluster-ccs-gcp.adoc +++ b/modules/osd-create-cluster-ccs-gcp.adoc @@ -9,7 +9,7 @@ [id="osd-create-gcp-cluster-ccs1_{context}"] -= Creating a cluster on GCP with CCS += Creating a cluster on {gcp-short} with CCS .Procedure @@ -31,19 +31,19 @@ For more information, contact your sales representative or Red Hat support. .. Select the *Customer Cloud Subscription* infrastructure type to deploy {product-title} in an existing cloud provider account that you own. .. Click *Next*. -. Select *Run on Google Cloud Platform*. +. Select *Run on {gcp-full}*. . Select *Service Account* as the Authentication type. + [NOTE] ==== -Red Hat recommends using Workload Identity Federation as the Authentication type. For more information, see xref:../osd_gcp_clusters/creating-a-gcp-cluster-with-workload-identity-federation.adoc#osd-creating-a-cluster-on-gcp-with-workload-identity-federation[Creating a cluster on GCP with Workload Identity Federation authentication]. +Red Hat recommends using Workload Identity Federation as the Authentication type. For more information, see xref:../osd_gcp_clusters/creating-a-gcp-cluster-with-workload-identity-federation.adoc#osd-creating-a-cluster-on-gcp-with-workload-identity-federation[Creating a cluster on {gcp-short} with Workload Identity Federation authentication]. ==== + . Review and complete the listed *Prerequisites*. . Select the checkbox to acknowledge that you have read and completed all of the prerequisites. -. Provide your GCP service account private key in JSON format. You can either click *Browse* to locate and attach a JSON file or add the details in the *Service account JSON* field. +. Provide your {gcp-short} service account private key in JSON format. You can either click *Browse* to locate and attach a JSON file or add the details in the *Service account JSON* field. . Click *Next* to validate your cloud provider account and go to the *Cluster details* page. @@ -68,7 +68,7 @@ Clusters configured with Private Service Connect (PSC) are only supported on Ope + [IMPORTANT] ==== -To successfully create a cluster, you must select *Enable Secure Boot support for Shielded VMs* if your organization has the policy constraint `constraints/compute.requireShieldedVm` enabled. For more information regarding GCP organizational policy constraints, see link:https://cloud.google.com/resource-manager/docs/organization-policy/org-policy-constraints[Organization policy constraints]. +To successfully create a cluster, you must select *Enable Secure Boot support for Shielded VMs* if your organization has the policy constraint `constraints/compute.requireShieldedVm` enabled. For more information regarding {gcp-short} organizational policy constraints, see link:https://cloud.google.com/resource-manager/docs/organization-policy/org-policy-constraints[Organization policy constraints]. ==== + @@ -148,7 +148,7 @@ If you are using private API endpoints, you cannot access your cluster until you ==== + -. Optional: To install the cluster in an existing GCP Virtual Private Cloud (VPC): +. Optional: To install the cluster in an existing {gcp-short} Virtual Private Cloud (VPC): .. Select *Install into an existing VPC*. + [IMPORTANT] @@ -175,7 +175,7 @@ For more information about custom application ingress settings, click on the inf . Click *Next*. -. Optional: To install the cluster into a GCP Shared VPC: +. Optional: To install the cluster into a {gcp-short} Shared VPC: + [IMPORTANT] ==== @@ -183,7 +183,7 @@ For more information about custom application ingress settings, click on the inf To install a cluster into a Shared VPC, you must use {product-title} version 4.13.15 or later. Additionally, the VPC owner of the host project must enable a project as a host project in their Google Cloud console. For more information, see link:https://cloud.google.com/vpc/docs/provisioning-shared-vpc#set-up-shared-vpc[Enable a host project]. ==== -.. Select *Install into GCP Shared VPC*. +.. Select *Install into {gcp-short} Shared VPC*. .. Specify the *Host project ID*. If the specified host project ID is incorrect, cluster creation fails. + [IMPORTANT] @@ -194,7 +194,7 @@ For information about Shared VPC permissions, see link:https://cloud.google.com/ ==== + -. If you opted to install the cluster in an existing GCP VPC, provide your *Virtual Private Cloud (VPC) subnet settings* and select *Next*. +. If you opted to install the cluster in an existing {gcp-short} VPC, provide your *Virtual Private Cloud (VPC) subnet settings* and select *Next*. You must have created the Cloud network address translation (NAT) and a Cloud router. See the "Additional resources" section for information about Cloud NATs and Google VPCs. + @@ -258,7 +258,7 @@ By default, clusters are created with the delete protection feature disabled. [NOTE] ==== -If you delete a cluster that was installed into a GCP Shared VPC, inform the VPC owner of the host project to remove the IAM policy roles granted to the service account that was referenced during cluster creation. +If you delete a cluster that was installed into a {gcp-short} Shared VPC, inform the VPC owner of the host project to remove the IAM policy roles granted to the service account that was referenced during cluster creation. ==== diff --git a/modules/osd-create-cluster-ccs.adoc b/modules/osd-create-cluster-ccs.adoc index f56e0557535b..3f19428b0c65 100644 --- a/modules/osd-create-cluster-ccs.adoc +++ b/modules/osd-create-cluster-ccs.adoc @@ -19,7 +19,7 @@ [NOTE] ==== The subscription types that are available to you depend on your {product-title} subscriptions and resource quotas. -Red Hat recommends deploying your cluster with the On-Demand subscription type purchased through the {GCP} Marketplace. This option provides flexible, consumption-based billing, consuming additional capacity is frictionless, and no Red Hat intervention is required. +Red Hat recommends deploying your cluster with the On-Demand subscription type purchased through the {gcp-full} Marketplace. This option provides flexible, consumption-based billing, consuming additional capacity is frictionless, and no Red Hat intervention is required. For more information, contact your sales representative or Red Hat support. ==== @@ -27,18 +27,18 @@ For more information, contact your sales representative or Red Hat support. .. Select the *Customer Cloud Subscription* infrastructure type to deploy {product-title} in an existing cloud provider account that you own. .. Click *Next*. -. Select *Run on Google Cloud Platform*. +. Select *Run on {gcp-full}*. . Select *Service Account* as the Authentication type. + [NOTE] ==== -Red Hat recommends using Workload Identity Federation as the Authentication type. For more information, see _Creating a cluster on GCP with Workload Identity Federation authentication_ in the _Additional resources_ section. +Red Hat recommends using Workload Identity Federation as the Authentication type. For more information, see _Creating a cluster on {gcp-short} with Workload Identity Federation authentication_ in the _Additional resources_ section. ==== + . Review and complete the listed *Prerequisites*. . Select the checkbox to acknowledge that you have read and completed all of the prerequisites. -. Provide your GCP service account private key in JSON format. You can either click *Browse* to locate and attach a JSON file or add the details in the *Service account JSON* field. +. Provide your {gcp-short} service account private key in JSON format. You can either click *Browse* to locate and attach a JSON file or add the details in the *Service account JSON* field. . Click *Next* to validate your cloud provider account and go to the *Cluster details* page. @@ -70,7 +70,7 @@ Clusters configured with Private Service Connect (PSC) are only supported on Ope + [IMPORTANT] ==== -To successfully create a cluster, you must select *Enable Secure Boot support for Shielded VMs* if your organization has the policy constraint `constraints/compute.requireShieldedVm` enabled. For more information regarding GCP organizational policy constraints, see link:https://cloud.google.com/resource-manager/docs/organization-policy/org-policy-constraints[Organization policy constraints]. +To successfully create a cluster, you must select *Enable Secure Boot support for Shielded VMs* if your organization has the policy constraint `constraints/compute.requireShieldedVm` enabled. For more information regarding {gcp-short} organizational policy constraints, see link:https://cloud.google.com/resource-manager/docs/organization-policy/org-policy-constraints[Organization policy constraints]. ==== // + // [IMPORTANT] @@ -80,7 +80,7 @@ To successfully create a cluster, you must select *Enable Secure Boot support fo + [IMPORTANT] ==== -*Enable Secure Boot support for Shielded VMs* is not supported for {product-title} on {GCP} clusters created using bare-metal instance types. For more information, see link:https://cloud.google.com/compute/shielded-vm/docs/shielded-vm#limitations[Limitations] in the Google Cloud documentation. +*Enable Secure Boot support for Shielded VMs* is not supported for {product-title} on {gcp-full} clusters created using bare-metal instance types. For more information, see link:https://cloud.google.com/compute/shielded-vm/docs/shielded-vm#limitations[Limitations] in the Google Cloud documentation. ==== + .. Leave *Enable user workload monitoring* selected to monitor your own projects in isolation from Red Hat Site Reliability Engineer (SRE) platform metrics. This option is enabled by default. @@ -159,7 +159,7 @@ If you are using private API endpoints, you cannot access your cluster until you ==== + -. Optional: To install the cluster in an existing GCP Virtual Private Cloud (VPC): +. Optional: To install the cluster in an existing {gcp-short} Virtual Private Cloud (VPC): + -- include::snippets/install-cluster-in-vpc.adoc[] @@ -190,7 +190,7 @@ For more information about custom application ingress settings, click on the inf + . Click *Next*. -. Optional: To install the cluster into a GCP Shared VPC: +. Optional: To install the cluster into a {gcp-short} Shared VPC: + [IMPORTANT] ==== @@ -198,7 +198,7 @@ For more information about custom application ingress settings, click on the inf To install a cluster into a Shared VPC, you must use {product-title} version 4.13.15 or later. Additionally, the VPC owner of the host project must enable a project as a host project in their Google Cloud console. For more information, see link:https://cloud.google.com/vpc/docs/provisioning-shared-vpc#set-up-shared-vpc[Enable a host project]. ==== -.. Select *Install into GCP Shared VPC*. +.. Select *Install into {gcp-short} Shared VPC*. .. Specify the *Host project ID*. If the specified host project ID is incorrect, cluster creation fails. + [IMPORTANT] @@ -209,7 +209,7 @@ For information about Shared VPC permissions, see link:https://cloud.google.com/ ==== + -. If you opted to install the cluster in an existing GCP VPC, provide your *Virtual Private Cloud (VPC) subnet settings* and select *Next*. +. If you opted to install the cluster in an existing {gcp-short} VPC, provide your *Virtual Private Cloud (VPC) subnet settings* and select *Next*. You must have created the Cloud network address translation (NAT) and a Cloud router. See the "Additional resources" section for information about Cloud NATs and Google VPCs. + @@ -273,7 +273,7 @@ By default, clusters are created with the delete protection feature disabled. [NOTE] ==== -If you delete a cluster that was installed into a GCP Shared VPC, inform the VPC owner of the host project to remove the IAM policy roles granted to the service account that was referenced during cluster creation. +If you delete a cluster that was installed into a {gcp-short} Shared VPC, inform the VPC owner of the host project to remove the IAM policy roles granted to the service account that was referenced during cluster creation. ==== .Verification @@ -282,5 +282,5 @@ If you delete a cluster that was installed into a GCP Shared VPC, inform the VPC [IMPORTANT] ==== -If your cluster deployment fails during installation, certain resources created during the installation process are not automatically removed from your {GCP} account. To remove these resources from your GCP account, you must delete the failed cluster. +If your cluster deployment fails during installation, certain resources created during the installation process are not automatically removed from your {gcp-short} account. To remove these resources from your {gcp-short} account, you must delete the failed cluster. ==== diff --git a/modules/osd-create-cluster-gcp-account.adoc b/modules/osd-create-cluster-gcp-account.adoc index 3412cef7858c..9f7495bb6ea3 100644 --- a/modules/osd-create-cluster-gcp-account.adoc +++ b/modules/osd-create-cluster-gcp-account.adoc @@ -4,7 +4,7 @@ :_mod-docs-content-type: PROCEDURE [id="osd-create-cluster-gcp-account_{context}"] -= Creating a cluster on GCP with Google Cloud Marketplace += Creating a cluster on {gcp-short} with Google Cloud Marketplace When creating an {product-title} (OSD) cluster on Google Cloud through the OpenShift Cluster Manager Hybrid Cloud Console, customers can select Google Cloud Marketplace as their preferred billing model. This billing model allows Red Hat customers to take advantage of their link:https://cloud.google.com/docs/cuds[Google Committed Use Discounts (CUD)] towards {product-title} purchased through the Google Cloud Marketplace. Additionally, OSD pricing is consumption-based and customers are billed directly through their Google Cloud account. @@ -19,7 +19,7 @@ When creating an {product-title} (OSD) cluster on Google Cloud through the OpenS .. From the drop-down menu, select *Google Cloud Marketplace*. .. Select the *Customer Cloud Subscription* infrastructure type. .. Click *Next*. -. On the *Cloud provider* page, select *Run on Google Cloud Platform*. +. On the *Cloud provider* page, select *Run on {gcp-full}*. . Select either *Service account* or *Workload Identity Federation* as the Authentication type. + [NOTE] @@ -29,7 +29,7 @@ For more information about authentication types, click the question icon located + . Review and complete the listed *Prerequisites*. . Select the checkbox to acknowledge that you have read and completed all of the prerequisites. -. If you selected *Service account* as the Authentication type, provide your GCP service account private key in JSON format. You can either click *Browse* to locate and attach a JSON file or add the details in the *Service account JSON* field. +. If you selected *Service account* as the Authentication type, provide your {gcp-short} service account private key in JSON format. You can either click *Browse* to locate and attach a JSON file or add the details in the *Service account JSON* field. . If you selected *Workload Identity Federation* as the Authentication type, you will first need to create a new WIF configuration. Open a terminal window and run the following `ocm` CLI command. + @@ -62,7 +62,7 @@ Workload Identity Federation (WIF) is only supported on {product-title} version + [IMPORTANT] ==== -To successfully create a cluster, you must select *Enable Secure Boot support for Shielded VMs* if your organization has the policy constraint `constraints/compute.requireShieldedVm` enabled. For more information regarding GCP organizational policy constraints, see link:https://cloud.google.com/resource-manager/docs/organization-policy/org-policy-constraints[Organization policy constraints]. +To successfully create a cluster, you must select *Enable Secure Boot support for Shielded VMs* if your organization has the policy constraint `constraints/compute.requireShieldedVm` enabled. For more information regarding {gcp-short} organizational policy constraints, see link:https://cloud.google.com/resource-manager/docs/organization-policy/org-policy-constraints[Organization policy constraints]. ==== + .. Leave *Enable user workload monitoring* selected to monitor your own projects in isolation from Red Hat Site Reliability Engineer (SRE) platform metrics. This option is enabled by default. @@ -123,7 +123,7 @@ Red Hat recommends using Private Service Connect when deploying a private {produ ==== //Once PSC docs are live add link from note above. + -. Optional: To install the cluster in an existing GCP Virtual Private Cloud (VPC): +. Optional: To install the cluster in an existing {gcp-short} Virtual Private Cloud (VPC): .. Select *Install into an existing VPC*. + @@ -150,7 +150,7 @@ For more information about custom application ingress settings, click on the inf . Click *Next*. -. Optional: To install the cluster into a GCP Shared VPC: +. Optional: To install the cluster into a {gcp-short} Shared VPC: + [IMPORTANT] ==== @@ -158,7 +158,7 @@ For more information about custom application ingress settings, click on the inf To install a cluster into a Shared VPC, you must use {product-title} version 4.13.15 or later. Additionally, the VPC owner of the host project must enable a project as a host project in their Google Cloud console. For more information, see link:https://cloud.google.com/vpc/docs/provisioning-shared-vpc#set-up-shared-vpc[Enable a host project]. ==== -.. Select *Install into GCP Shared VPC*. +.. Select *Install into {gcp-short} Shared VPC*. .. Specify the *Host project ID*. If the specified host project ID is incorrect, cluster creation fails. + [IMPORTANT] @@ -168,7 +168,7 @@ The VPC owner of the host project has 30 days to grant the listed permissions be For information about Shared VPC permissions, see link:https://cloud.google.com/vpc/docs/provisioning-shared-vpc#migs-service-accounts[Provision Shared VPC]. ==== + -. If you opted to install the cluster in an existing GCP VPC, provide your *Virtual Private Cloud (VPC) subnet settings* and select *Next*. +. If you opted to install the cluster in an existing {gcp-short} VPC, provide your *Virtual Private Cloud (VPC) subnet settings* and select *Next*. + [NOTE] ==== diff --git a/modules/osd-create-cluster-red-hat-account.adoc b/modules/osd-create-cluster-red-hat-account.adoc index 543f27cc437b..1c56b9084895 100644 --- a/modules/osd-create-cluster-red-hat-account.adoc +++ b/modules/osd-create-cluster-red-hat-account.adoc @@ -5,7 +5,7 @@ :_mod-docs-content-type: PROCEDURE [id="osd-create-gcp-cluster-ccs_{context}"] -= Creating a cluster on GCP with a Red Hat cloud account using {cluster-manager} += Creating a cluster on {gcp-short} with a Red Hat cloud account using {cluster-manager} Through {cluster-manager-url}, you can create an {product-title} cluster on {GCP} using a standard cloud provider account owned by Red Hat. @@ -27,7 +27,7 @@ You must have the required resource quota for the *Annual* subscription type to + .. Select the *Red Hat cloud account* infrastructure type to deploy {product-title} in a cloud provider account that is owned by Red Hat. .. Click *Next*. -. Select *Run on Google Cloud Platform* and click *Next*. +. Select *Run on {gcp-full}* and click *Next*. . On the *Cluster details* page, provide a name for your cluster and specify the cluster details: .. Add a *Cluster name*. .. Optional: Cluster creation generates a domain prefix as a subdomain for your provisioned cluster on `openshiftapps.com`. If the cluster name is less than or equal to 15 characters, that name is used for the domain prefix. If the cluster name is longer than 15 characters, the domain prefix is randomly generated as a 15-character string. @@ -52,7 +52,7 @@ To customize the subdomain, select the *Create custom domain prefix* checkbox, a + [IMPORTANT] ==== -To successfully create a cluster, you must select *Enable Secure Boot support for Shielded VMs* if your organization has the policy constraint `constraints/compute.requireShieldedVm` enabled. For more information regarding GCP organizational policy constraints, see link:https://cloud.google.com/resource-manager/docs/organization-policy/org-policy-constraints[Organization policy constraints]. +To successfully create a cluster, you must select *Enable Secure Boot support for Shielded VMs* if your organization has the policy constraint `constraints/compute.requireShieldedVm` enabled. For more information regarding {gcp-short} organizational policy constraints, see link:https://cloud.google.com/resource-manager/docs/organization-policy/org-policy-constraints[Organization policy constraints]. ==== + [IMPORTANT] @@ -135,5 +135,5 @@ By default, clusters are created with the delete protection feature disabled. [IMPORTANT] ==== -If your cluster deployment fails during installation, certain resources created during the installation process are not automatically removed from your {GCP} account. To remove these resources from your GCP account, you must delete the failed cluster. +If your cluster deployment fails during installation, certain resources created during the installation process are not automatically removed from your {GCP} account. To remove these resources from your {gcp-short} account, you must delete the failed cluster. ==== \ No newline at end of file diff --git a/modules/osd-create-cluster-rhm-gcp-account.adoc b/modules/osd-create-cluster-rhm-gcp-account.adoc index d620eb85b73e..2fd2b053e5a4 100644 --- a/modules/osd-create-cluster-rhm-gcp-account.adoc +++ b/modules/osd-create-cluster-rhm-gcp-account.adoc @@ -4,7 +4,7 @@ :_mod-docs-content-type: PROCEDURE [id="osd-create-cluster-rhm-gcp-account_{context}"] -= Creating a cluster on GCP with Red Hat Marketplace += Creating a cluster on {gcp-short} with Red Hat Marketplace When creating an {product-title} (OSD) cluster on Google Cloud through the OpenShift Cluster Manager Hybrid Cloud Console, customers can select Red Hat Marketplace as their preferred billing model. OSD pricing is consumption-based and customers are billed directly through their Red Hat Marketplace account. @@ -19,7 +19,7 @@ OSD pricing is consumption-based and customers are billed directly through their .. Select the *On-Demand* subscription type. .. From the drop-down menu, select *Red Hat Marketplace*. .. Click *Next*. -. On the *Cloud provider* page, select *Run on Google Cloud Platform*. +. On the *Cloud provider* page, select *Run on {gcp-full}*. . Select either *Service account* or *Workload Identity Federation* as the Authentication type. + [NOTE] @@ -29,7 +29,7 @@ For more information about authentication types, click the question icon located + . Review and complete the listed *Prerequisites*. . Select the checkbox to acknowledge that you have read and completed all of the prerequisites. -. If you selected *Service account* as the Authentication type, provide your GCP service account private key in JSON format. You can either click *Browse* to locate and attach a JSON file or add the details in the *Service account JSON* field. +. If you selected *Service account* as the Authentication type, provide your {gcp-short} service account private key in JSON format. You can either click *Browse* to locate and attach a JSON file or add the details in the *Service account JSON* field. . If you selected *Workload Identity Federation* as the Authentication type, you will first need to create a new WIF configuration. Open a terminal window and run the following `ocm` CLI command. + @@ -62,7 +62,7 @@ Workload Identity Federation (WIF) is only supported on {product-title} version + [IMPORTANT] ==== -To successfully create a cluster, you must select *Enable Secure Boot support for Shielded VMs* if your organization has the policy constraint `constraints/compute.requireShieldedVm` enabled. For more information regarding GCP organizational policy constraints, see link:https://cloud.google.com/resource-manager/docs/organization-policy/org-policy-constraints[Organization policy constraints]. +To successfully create a cluster, you must select *Enable Secure Boot support for Shielded VMs* if your organization has the policy constraint `constraints/compute.requireShieldedVm` enabled. For more information regarding {gcp-short} organizational policy constraints, see link:https://cloud.google.com/resource-manager/docs/organization-policy/org-policy-constraints[Organization policy constraints]. ==== + .. Leave *Enable user workload monitoring* selected to monitor your own projects in isolation from Red Hat Site Reliability Engineer (SRE) platform metrics. This option is enabled by default. @@ -123,7 +123,7 @@ Red Hat recommends using Private Service Connect when deploying a private {produ ==== //Once PSC docs are live add link from note above. + -. Optional: To install the cluster in an existing GCP Virtual Private Cloud (VPC): +. Optional: To install the cluster in an existing {gcp-short} Virtual Private Cloud (VPC): .. Select *Install into an existing VPC*. + @@ -150,14 +150,14 @@ For more information about custom application ingress settings, click on the inf . Click *Next*. -. Optional: To install the cluster into a GCP shared VPC: +. Optional: To install the cluster into a {gcp-short} shared VPC: + [IMPORTANT] ==== -To install a cluster into a GCP shared VPC, you must use {product-title} version 4.13.15 or later. Additionally, the VPC owner of the host project must enable a project as a host project in their Google Cloud console. For more information, see link:https://cloud.google.com/vpc/docs/provisioning-shared-vpc#set-up-shared-vpc[Enable a host project]. +To install a cluster into a {gcp-short} shared VPC, you must use {product-title} version 4.13.15 or later. Additionally, the VPC owner of the host project must enable a project as a host project in their Google Cloud console. For more information, see link:https://cloud.google.com/vpc/docs/provisioning-shared-vpc#set-up-shared-vpc[Enable a host project]. ==== -.. Select *Install into GCP Shared VPC*. +.. Select *Install into {gcp-short} Shared VPC*. .. Specify the *Host project ID*. If the specified host project ID is incorrect, cluster creation fails. + [IMPORTANT] @@ -172,7 +172,7 @@ For information about Shared VPC permissions, see link:https://cloud.google.com/ [NOTE] ==== -If you are installing a cluster into a GCP Shared VPC, the VPC name and subnets are shared from the host project. +If you are installing a cluster into a {gcp-short} Shared VPC, the VPC name and subnets are shared from the host project. ==== + diff --git a/modules/osd-gcp-architecture.adoc b/modules/osd-gcp-architecture.adoc index 61ab4087e00b..264d1c7ad5f7 100644 --- a/modules/osd-gcp-architecture.adoc +++ b/modules/osd-gcp-architecture.adoc @@ -4,7 +4,7 @@ :_mod-docs-content-type: CONCEPT [id="osd-gcp-architecture_{context}"] -= Private {product-title} on {GCP} architecture on public and private networks += Private {product-title} on {gcp-full} architecture on public and private networks You can customize the access patterns for your API server endpoint and Red Hat SRE management by choosing one of the following network configuration types: diff --git a/modules/osd-gcp-psc-firewall-prerequisites.adoc b/modules/osd-gcp-psc-firewall-prerequisites.adoc index a92750f83b9d..cff68b9686c2 100644 --- a/modules/osd-gcp-psc-firewall-prerequisites.adoc +++ b/modules/osd-gcp-psc-firewall-prerequisites.adoc @@ -4,13 +4,13 @@ :_mod-docs-content-type: PROCEDURE [id="osd-gcp-psc-firewall-prerequisites_{context}"] -= GCP firewall prerequisites += {gcp-short} firewall prerequisites -If you are using a firewall to control egress traffic from {product-title} on {GCP}, you must configure your firewall to grant access to certain domains and port combinations listed in the tables below. {product-title} requires this access to provide a fully managed OpenShift service. +If you are using a firewall to control egress traffic from {product-title} on {gcp-full}, you must configure your firewall to grant access to certain domains and port combinations listed in the tables below. {product-title} requires this access to provide a fully managed OpenShift service. [IMPORTANT] ==== -Only {product-title} on {GCP} clusters deployed with Private Service Connect can use a firewall to control egress traffic. +Only {product-title} on {gcp-full} clusters deployed with Private Service Connect can use a firewall to control egress traffic. ==== // .Prerequisites @@ -162,7 +162,7 @@ Managed clusters require the enabling of telemetry to allow Red Hat to react mor |The SFTP server used by `must-gather-operator` to upload diagnostic logs to help troubleshoot issues with the cluster. |=== -. Add the following URLs for the {GCP} API endpoints to an allowlist: +. Add the following URLs for the {gcp-full} API endpoints to an allowlist: + [cols="6,1,6",options="header"] |=== @@ -170,7 +170,7 @@ Managed clusters require the enabling of telemetry to allow Red Hat to react mor | `accounts.google.com` | 443 -| Used to access your GCP account. +| Used to access your {gcp-short} account. |`*.googleapis.com` @@ -192,7 +192,7 @@ OR `iamcredentials.googleapis.com` | 443 -| Used to access GCP services and resources. Review link:https://cloud.google.com/endpoints/docs[Cloud Endpoints] in the GCP documentation to determine the endpoints to allow for your APIs. +| Used to access {gcp-short} services and resources. Review link:https://cloud.google.com/endpoints/docs[Cloud Endpoints] in the {gcp-short} documentation to determine the endpoints to allow for your APIs. |=== + [NOTE] diff --git a/modules/osd-intro.adoc b/modules/osd-intro.adoc index ea88cf792a26..fd995ea96ebc 100644 --- a/modules/osd-intro.adoc +++ b/modules/osd-intro.adoc @@ -6,11 +6,11 @@ [id="osd-intro_{context}"] = An overview of {product-title} -{product-title} is professionally managed by Red Hat and hosted on {AWS} or {GCP}. Each {product-title} cluster comes with a fully managed link:https://access.redhat.com/documentation/en-us/openshift_container_platform/{ocp-version}/html/architecture/control-plane[control plane] (Control and Infrastructure nodes), application nodes, installation and management by Red Hat Site Reliability Engineers (SRE), premium Red Hat Support, and cluster services such as logging, metrics, monitoring, notifications portal, and a cluster portal. +{product-title} is professionally managed by Red Hat and hosted on {AWS} or {gcp-full}. Each {product-title} cluster comes with a fully managed link:https://access.redhat.com/documentation/en-us/openshift_container_platform/{ocp-version}/html/architecture/control-plane[control plane] (Control and Infrastructure nodes), application nodes, installation and management by Red Hat Site Reliability Engineers (SRE), premium Red Hat Support, and cluster services such as logging, metrics, monitoring, notifications portal, and a cluster portal. {product-title} provides enterprise-ready enhancements to Kubernetes, including the following enhancements: -* {product-title} clusters are deployed on AWS or GCP environments and can be used as part of a hybrid approach for application management. +* {product-title} clusters are deployed on AWS or {gcp-short} environments and can be used as part of a hybrid approach for application management. * Integrated Red Hat technology. Major components in {product-title} come from Red Hat Enterprise Linux and related Red Hat technologies. {product-title} benefits from the intense testing and certification initiatives for Red Hat’s enterprise quality software. diff --git a/modules/osd-private-architecture-model-gcp.adoc b/modules/osd-private-architecture-model-gcp.adoc index 549eaa647c03..8bf26555cd33 100644 --- a/modules/osd-private-architecture-model-gcp.adoc +++ b/modules/osd-private-architecture-model-gcp.adoc @@ -4,9 +4,9 @@ :_mod-docs-content-type: CONCEPT [id="osd-private-architecture-model_{context}"] -= Private {product-title} on {GCP} without Private Service Connect (PSC) architecture model += Private {product-title} on {gcp-full} without Private Service Connect (PSC) architecture model -With a private network configuration, your cluster API server endpoint and application routes are private. Private {product-title} on GCP clusters use some public subnets, but no control plane or worker nodes are deployed in public subnets. +With a private network configuration, your cluster API server endpoint and application routes are private. Private {product-title} on {gcp-short} clusters use some public subnets, but no control plane or worker nodes are deployed in public subnets. [IMPORTANT] ==== diff --git a/modules/osd-private-psc-architecture-model-gcp.adoc b/modules/osd-private-psc-architecture-model-gcp.adoc index c28b9b1d5e2b..7f2e4da36d2c 100644 --- a/modules/osd-private-psc-architecture-model-gcp.adoc +++ b/modules/osd-private-psc-architecture-model-gcp.adoc @@ -4,10 +4,10 @@ :_mod-docs-content-type: CONCEPT [id="osd-private-psc-architecture-model-gcp_{context}"] -= Private {product-title} on {GCP} with Private Service Connect architecture model += Private {product-title} on {gcp-full} with Private Service Connect architecture model -With a private GCP Private Service Connect (PSC) network configuration, your cluster API server endpoint and application routes are private. Public subnets or NAT gateways are not required in your VPC for egress. -Red Hat SRE management access the cluster over the GCP PSC-enabled private connectivity. The default ingress controller are private. Additional ingress controllers can be public or private. The following diagram shows network connectivity of a private cluster with PSC. +With a private {gcp-short} Private Service Connect (PSC) network configuration, your cluster API server endpoint and application routes are private. Public subnets or NAT gateways are not required in your VPC for egress. +Red Hat SRE management access the cluster over the {gcp-full} PSC-enabled private connectivity. The default ingress controller are private. Additional ingress controllers can be public or private. The following diagram shows network connectivity of a private cluster with PSC. .{product-title} on {GCP} deployed on a private network with PSC image::484_a_OpenShift_osd_gcp_private_psc_arch_0525.png[Private with PSC architecture model] diff --git a/modules/osd-public-architecture-model-gcp.adoc b/modules/osd-public-architecture-model-gcp.adoc index b9d33da6b82e..36f0b7f3b1d4 100644 --- a/modules/osd-public-architecture-model-gcp.adoc +++ b/modules/osd-public-architecture-model-gcp.adoc @@ -4,7 +4,7 @@ :_mod-docs-content-type: CONCEPT [id="osd-public-architecture-model-gcp_{context}"] -= Public {product-title} on {GCP} architecture model += Public {product-title} on {gcp-full} architecture model With a public network configuration, your cluster API server endpoint and application routes are internet-facing. The default ingress controller can be public or private. The following image shows the network connectivity of a public cluster. diff --git a/modules/osd-understanding-private-service-connect.adoc b/modules/osd-understanding-private-service-connect.adoc index 33c3dd125d2f..7717dc7482ca 100644 --- a/modules/osd-understanding-private-service-connect.adoc +++ b/modules/osd-understanding-private-service-connect.adoc @@ -7,7 +7,7 @@ [id="osd-understanding-private-service-connect_{context}"] = Understanding Private Service Connect -Private Service Connect (PSC), a capability of Google Cloud networking, enables private communication between services across different projects or organizations within GCP. Users that implement PSC as part of their network connectivity can deploy {product-title} clusters in a private and secured environment within {GCP} without any public facing cloud resources. +Private Service Connect (PSC), a capability of Google Cloud networking, enables private communication between services across different projects or organizations within {gcp-short}. Users that implement PSC as part of their network connectivity can deploy {product-title} clusters in a private and secured environment within {GCP} without any public facing cloud resources. For more information about PSC, see link:https://cloud.google.com/vpc/docs/private-service-connect[Private Service Connect]. diff --git a/modules/ossm-federation-across-cluster.adoc b/modules/ossm-federation-across-cluster.adoc index 403d8b9b7572..1359ec7cf089 100644 --- a/modules/ossm-federation-across-cluster.adoc +++ b/modules/ossm-federation-across-cluster.adoc @@ -39,8 +39,8 @@ The IP address found in the `.status.loadBalancer.ingress.ip` field of the ingre endif::openshift-rosa,openshift-rosa-hcp,openshift-dedicated[] ifndef::openshift-rosa,openshift-rosa-hcp[] -== Exposing the federation ingress on Google Cloud Platform (GCP) -On Google Cloud Platform, merely setting the service type to `LoadBalancer` suffices for mesh federation to operate correctly. +== Exposing the federation ingress on {gcp-full} +On {gcp-full}, merely setting the service type to `LoadBalancer` suffices for mesh federation to operate correctly. The IP address found in the `.status.loadBalancer.ingress.ip` field of the ingress gateway `Service` object should be specified as one of the entries in the `.spec.remote.addresses` field of the `ServiceMeshPeer` object. endif::openshift-rosa,openshift-rosa-hcp[] diff --git a/modules/ossm-rn-fixed-issues.adoc b/modules/ossm-rn-fixed-issues.adoc index 917a0c15bbcd..c62d2a6ec58b 100644 --- a/modules/ossm-rn-fixed-issues.adoc +++ b/modules/ossm-rn-fixed-issues.adoc @@ -91,7 +91,7 @@ Now, if you deploy a v2.2 or v2.1 of the 'ServiceMeshControlPlane' resource, the * https://issues.redhat.com/browse/OSSM-3595[OSSM-3595] Previously, the `istio-cni` plugin sometimes failed on {op-system-base} because SELinux did not allow the utility `iptables-restore` to open files in the `/tmp` directory. Now, SELinux passes `iptables-restore` via `stdin` input stream instead of via a file. -* https://issues.redhat.com/browse/OSSM-3586[OSSM-3586] Previously, Istio proxies were slow to start when Google Cloud Platform (GCP) metadata servers were not available. When you upgrade to Istio 1.14.6, Istio proxies start as expected on GCP, even if metadata servers are not available. +* https://issues.redhat.com/browse/OSSM-3586[OSSM-3586] Previously, Istio proxies were slow to start when {gcp-first} metadata servers were not available. When you upgrade to Istio 1.14.6, Istio proxies start as expected on {gcp-first}, even if metadata servers are not available. * https://issues.redhat.com/browse/OSSM-3025[OSSM-3025] Istiod sometimes fails to become ready. Sometimes, when a mesh contained many member namespaces, the Istiod pod did not become ready due to a deadlock within Istiod. The deadlock is now resolved and the pod now starts as expected. diff --git a/modules/persistent-storage-byok.adoc b/modules/persistent-storage-byok.adoc index 9acee6549418..4c17246c468b 100644 --- a/modules/persistent-storage-byok.adoc +++ b/modules/persistent-storage-byok.adoc @@ -16,6 +16,6 @@ This features supports the following storage types: * Microsoft Azure Disk storage -* Google Cloud Platform (GCP) persistent disk (PD) storage +* {gcp-first} persistent disk (PD) storage * IBM Virtual Private Cloud (VPC) Block storage diff --git a/modules/persistent-storage-csi-drivers-supported.adoc b/modules/persistent-storage-csi-drivers-supported.adoc index aac3baeee244..c3fa67ccce6d 100644 --- a/modules/persistent-storage-csi-drivers-supported.adoc +++ b/modules/persistent-storage-csi-drivers-supported.adoc @@ -12,7 +12,7 @@ To create CSI-provisioned persistent volumes that mount to these supported stora ifndef::openshift-rosa,openshift-rosa-hcp[] [IMPORTANT] ==== -The AWS EFS and GCP Filestore CSI drivers are not installed by default, and must be installed manually. For instructions on installing the AWS EFS CSI driver, see link:https://access.redhat.com/documentation/en-us/openshift_dedicated/4/html/storage/using-container-storage-interface-csi#osd-persistent-storage-aws-efs-csi[Setting up AWS Elastic File Service CSI Driver Operator]. For instructions on installing the GCP Filestore CSI driver, see link:https://access.redhat.com/documentation/en-us/openshift_container_platform/4.13/html/storage/using-container-storage-interface-csi#persistent-storage-csi-google-cloud-file-overview[Google Compute Platform Filestore CSI Driver Operator]. +The AWS EFS and {gcp-short} Filestore CSI drivers are not installed by default, and must be installed manually. For instructions on installing the AWS EFS CSI driver, see link:https://access.redhat.com/documentation/en-us/openshift_dedicated/4/html/storage/using-container-storage-interface-csi#osd-persistent-storage-aws-efs-csi[Setting up AWS Elastic File Service CSI Driver Operator]. For instructions on installing the {gcp-short} Filestore CSI driver, see link:https://access.redhat.com/documentation/en-us/openshift_container_platform/4.13/html/storage/using-container-storage-interface-csi#persistent-storage-csi-google-cloud-file-overview[Google Compute Platform Filestore CSI Driver Operator]. ==== endif::openshift-rosa,openshift-rosa-hcp[] @@ -49,8 +49,8 @@ endif::openshift-rosa,openshift-rosa-hcp,openshift-aro[] |AWS EBS | ✅ | | | ✅| |AWS EFS | | | | | ifndef::openshift-rosa,openshift-rosa-hcp[] -|Google Compute Platform (GCP) persistent disk (PD)| ✅| |✅^[2]^ | ✅| -|GCP Filestore | ✅ | | | ✅| +|Google Compute Platform ({gcp-full}) persistent disk (PD)| ✅| |✅^[2]^ | ✅| +|{gcp-full} Filestore | ✅ | | | ✅| endif::openshift-rosa,openshift-rosa-hcp[] ifndef::openshift-dedicated,openshift-rosa,openshift-rosa-hcp[] |{ibm-power-server-name} Block | | | | ✅ | diff --git a/modules/persistent-storage-csi-gcp-filestore-nfs-export-options.adoc b/modules/persistent-storage-csi-gcp-filestore-nfs-export-options.adoc index 0cfd58d3a04e..015cee6f05ce 100644 --- a/modules/persistent-storage-csi-gcp-filestore-nfs-export-options.adoc +++ b/modules/persistent-storage-csi-gcp-filestore-nfs-export-options.adoc @@ -19,7 +19,7 @@ By default, a Filestore instance grants root level read/write access to all clie + [NOTE] ==== -For more information about creating a storage class, see Section _Creating a storage class for GCP Filestore Operator_. +For more information about creating a storage class, see Section _Creating a storage class for {gcp-short} Filestore Operator_. ==== + .Example storage class YAML file with NFS export options diff --git a/modules/persistent-storage-csi-gcp-hyperdisk-limitations.adoc b/modules/persistent-storage-csi-gcp-hyperdisk-limitations.adoc index afe7ce80ed66..e5775cd862c7 100644 --- a/modules/persistent-storage-csi-gcp-hyperdisk-limitations.adoc +++ b/modules/persistent-storage-csi-gcp-hyperdisk-limitations.adoc @@ -5,7 +5,7 @@ :_mod-docs-content-type: CONCEPT [id="persistent-storage-csi-gcp-hyperdisk-limitations_{context}"] = C3 and N4 instance type limitations -The GCP PD CSI driver support for the C3 instance type for bare metal and N4 machine series have the following limitations: +The {gcp-short} PD CSI driver support for the C3 instance type for bare metal and N4 machine series have the following limitations: * You must set the volume size to at least 4Gi when you create hyperdisk-balanced disks. {product-title} does not round up to the minimum size, so you must specify the correct size yourself. @@ -24,7 +24,7 @@ For information about creating the storage class, see Step 2 in Section _Setting ifndef::openshift-dedicated[] * Clusters with mixed virtual machines (VMs) that use different storage types, for example, N2 and N4, are not supported. This is due to hyperdisks-balanced disks not being usable on most legacy VMs. Similarly, regular persistent disks are not usable on N4/C3 VMs. -* A GCP cluster with c3-standard-2, c3-standard-4, n4-standard-2, and n4-standard-4 nodes can erroneously exceed the maximum attachable disk number, which should be 16 (link:https://issues.redhat.com/browse/OCPBUGS-39258[JIRA link]). +* A {gcp-short} cluster with c3-standard-2, c3-standard-4, n4-standard-2, and n4-standard-4 nodes can erroneously exceed the maximum attachable disk number, which should be 16 (link:https://issues.redhat.com/browse/OCPBUGS-39258[JIRA link]). * link:https://cloud.google.com/compute/docs/disks/hyperdisks#limitations[Additional limitations]. endif::openshift-dedicated[] diff --git a/modules/persistent-storage-csi-gcp-hyperdisk-storage-pools-procedure.adoc b/modules/persistent-storage-csi-gcp-hyperdisk-storage-pools-procedure.adoc index 3f780c11b8f1..5c9a6f588b3e 100644 --- a/modules/persistent-storage-csi-gcp-hyperdisk-storage-pools-procedure.adoc +++ b/modules/persistent-storage-csi-gcp-hyperdisk-storage-pools-procedure.adoc @@ -36,7 +36,7 @@ allowVolumeExpansion: true volumeBindingMode: Immediate ---- <1> Specify the name for your storage class. In this example, the name is `hyperdisk-sc`. -<2> Specify the GCP CSI provisioner as `pd.csi.storage.gke.io`. +<2> Specify the {gcp-short} CSI provisioner as `pd.csi.storage.gke.io`. <3> If using storage pools, specify a list of specific storage pools that you want to use in the following format: `projects/PROJECT_ID/zones/ZONE/storagePools/STORAGE_POOL_NAME`. <4> Specify the disk type as `hyperdisk-balanced`. + @@ -48,7 +48,7 @@ If you use storage pools, you must first create a Hyperdisk Storage Pool of the endif::openshift-dedicated[] ifndef::openshift-dedicated[] -. Create a GCP cluster with attached disks provisioned with hyperdisk-balanced disks. +. Create a {gcp-short} cluster with attached disks provisioned with hyperdisk-balanced disks. endif::openshift-dedicated[] ifndef::openshift-dedicated[] @@ -56,7 +56,7 @@ ifndef::openshift-dedicated[] endif::openshift-dedicated[] ifndef::openshift-dedicated[] -.. Follow the procedure in the _Installing a cluster on GCP with customizations_ section. +.. Follow the procedure in the _Installing a cluster on {gcp-short} with customizations_ section. + For your install-config.yaml file, use the following example file: + @@ -154,7 +154,7 @@ allowedTopologies: <7> ... ---- <1> Specify the name for your storage class. In this example, it is `hyperdisk-sc`. -<2> `pd.csi.storage.gke.io` specifies GCP CSI provisioner. +<2> `pd.csi.storage.gke.io` specifies {gcp-short} CSI provisioner. <3> Specifies using hyperdisk-balanced disks. <4> Specifies the throughput value in MiBps using the "Mi" qualifier. For example, if your required throughput is 250 MiBps, specify "250Mi". If you do not specify a value, the capacity is based upon the disk type default. <5> Specifies the IOPS value without any qualifiers. For example, if you require 7,000 IOPS, specify "7000". If you do not specify a value, the capacity is based upon the disk type default. diff --git a/modules/persistent-storage-csi-gcp-pd-encrypted-pv.adoc b/modules/persistent-storage-csi-gcp-pd-encrypted-pv.adoc index cb35067ea158..ea3e9126d10b 100644 --- a/modules/persistent-storage-csi-gcp-pd-encrypted-pv.adoc +++ b/modules/persistent-storage-csi-gcp-pd-encrypted-pv.adoc @@ -6,7 +6,7 @@ [id="persistent-storage-csi-gcp-pd-encrypted-pv_{context}"] = Creating a custom-encrypted persistent volume -When you create a `PersistentVolumeClaim` object, {product-title} provisions a new persistent volume (PV) and creates a `PersistentVolume` object. You can add a custom encryption key in Google Cloud Platform (GCP) to protect a PV in your cluster by encrypting the newly created PV. +When you create a `PersistentVolumeClaim` object, {product-title} provisions a new persistent volume (PV) and creates a `PersistentVolume` object. You can add a custom encryption key in {gcp-first} to protect a PV in your cluster by encrypting the newly created PV. For encryption, the newly attached PV that you create uses customer-managed encryption keys (CMEK) on a cluster by using a new or existing Google Cloud Key Management Service (KMS) key. diff --git a/modules/persistent-storage-csi-gcp-pd-storage-class-ref.adoc b/modules/persistent-storage-csi-gcp-pd-storage-class-ref.adoc index 423ab3bac811..8726d836df49 100644 --- a/modules/persistent-storage-csi-gcp-pd-storage-class-ref.adoc +++ b/modules/persistent-storage-csi-gcp-pd-storage-class-ref.adoc @@ -3,11 +3,11 @@ // * storage/container_storage_interface/persistent-storage-csi-gcp-pd.adoc [id="persistent-storage-csi-gcp-pd-storage-class-ref_{context}"] -= GCP PD CSI driver storage class parameters += {gcp-short} PD CSI driver storage class parameters -The Google Cloud Platform (GCP) persistent disk (PD) Container Storage Interface (CSI) driver uses the CSI `external-provisioner` sidecar as a controller. This is a separate helper container that is deployed with the CSI driver. The sidecar manages persistent volumes (PVs) by triggering the `CreateVolume` operation. +The {gcp-first} persistent disk (PD) Container Storage Interface (CSI) driver uses the CSI `external-provisioner` sidecar as a controller. This is a separate helper container that is deployed with the CSI driver. The sidecar manages persistent volumes (PVs) by triggering the `CreateVolume` operation. -The GCP PD CSI driver uses the `csi.storage.k8s.io/fstype` parameter key to support dynamic provisioning. The following table describes all the GCP PD CSI storage class parameters that are supported by {product-title}. +The {gcp-short} PD CSI driver uses the `csi.storage.k8s.io/fstype` parameter key to support dynamic provisioning. The following table describes all the {gcp-short} PD CSI storage class parameters that are supported by {product-title}. .CreateVolume Parameters [cols="2,3,2,4",options="header"] diff --git a/modules/persistent-storage-csi-google-cloud-file-create-sc.adoc b/modules/persistent-storage-csi-google-cloud-file-create-sc.adoc index d5fda4484508..3b8c890fd466 100644 --- a/modules/persistent-storage-csi-google-cloud-file-create-sc.adoc +++ b/modules/persistent-storage-csi-google-cloud-file-create-sc.adoc @@ -4,9 +4,9 @@ :_mod-docs-content-type: PROCEDURE [id="persistent-storage-csi-google-cloud-file-create-sc_{context}"] -= Creating a storage class for GCP Filestore Storage += Creating a storage class for {gcp-short} Filestore Storage -After installing the Operator, you should create a storage class for dynamic provisioning of Google Compute Platform (GCP) Filestore volumes. +After installing the Operator, you should create a storage class for dynamic provisioning of Google Compute Platform ({gcp-full}) Filestore volumes. .Prerequisites * You are logged in to the running {product-title} cluster. @@ -31,7 +31,7 @@ allowVolumeExpansion: true volumeBindingMode: WaitForFirstConsumer -- <1> For a shared VPC, use the `connect-mode` parameter set to `PRIVATE_SERVICE_ACCESS`. For a non-shared VPC, the value is `DIRECT_PEERING`, which is the default setting. -<2> Specify the name of the GCP virtual private cloud (VPC) network where Filestore instances should be created in. +<2> Specify the name of the {gcp-short} virtual private cloud (VPC) network where Filestore instances should be created in. . Specify the name of the VPC network where Filestore instances should be created in. + diff --git a/modules/persistent-storage-csi-google-cloud-file-delete-instances.adoc b/modules/persistent-storage-csi-google-cloud-file-delete-instances.adoc index 430b64d17416..a6753086bef2 100644 --- a/modules/persistent-storage-csi-google-cloud-file-delete-instances.adoc +++ b/modules/persistent-storage-csi-google-cloud-file-delete-instances.adoc @@ -4,14 +4,14 @@ :_mod-docs-content-type: PROCEDURE [id="persistent-storage-csi-google-cloud-file-delete-instances_{context}"] -= Destroying clusters and GCP Filestore += Destroying clusters and {gcp-short} Filestore -Typically, if you destroy a cluster, the {product-title} installer deletes all of the cloud resources that belong to that cluster. However, due to the special nature of the Google Compute Platform (GCP) Filestore resources, the automated cleanup process might not remove all of them in some rare cases. +Typically, if you destroy a cluster, the {product-title} installer deletes all of the cloud resources that belong to that cluster. However, due to the special nature of the Google Compute Platform ({gcp-full}) Filestore resources, the automated cleanup process might not remove all of them in some rare cases. Therefore, Red Hat recommends that you verify that all cluster-owned Filestore resources are deleted by the uninstall process. .Procedure -To ensure that all GCP Filestore PVCs have been deleted: +To ensure that all {gcp-short} Filestore PVCs have been deleted: . Access your Google Cloud account using the GUI or CLI. diff --git a/modules/persistent-storage-csi-manila-dynamic-provisioning.adoc b/modules/persistent-storage-csi-manila-dynamic-provisioning.adoc index 169749c32e26..5928b9633f54 100644 --- a/modules/persistent-storage-csi-manila-dynamic-provisioning.adoc +++ b/modules/persistent-storage-csi-manila-dynamic-provisioning.adoc @@ -10,7 +10,7 @@ The YAML files that are created are completely decoupled from Manila and from its Container Storage Interface (CSI) plugin. As an application developer, you can dynamically provision ReadWriteMany (RWX) storage and deploy pods with applications that safely consume the storage using YAML manifests. -You can use the same pod and persistent volume claim (PVC) definitions on-premise that you use with {product-title} on AWS, GCP, Azure, and other platforms, with the exception of the storage class reference in the PVC definition. +You can use the same pod and persistent volume claim (PVC) definitions on-premise that you use with {product-title} on AWS, {gcp-short}, Azure, and other platforms, with the exception of the storage class reference in the PVC definition. [IMPORTANT] ==== diff --git a/modules/persistent-storage-csi-migration-overview.adoc b/modules/persistent-storage-csi-migration-overview.adoc index 43cfde728668..65283627e8c7 100644 --- a/modules/persistent-storage-csi-migration-overview.adoc +++ b/modules/persistent-storage-csi-migration-overview.adoc @@ -15,7 +15,7 @@ The following in-tree to CSI drivers are automatically migrated: * Azure Disk * OpenStack Cinder * Amazon Web Services (AWS) Elastic Block Storage (EBS) -* Google Compute Engine Persistent Disk (GCP PD) +* Google Compute Engine Persistent Disk ({gcp-full} PD) * Azure File * VMware vSphere diff --git a/modules/policy-security-regulation-compliance.adoc b/modules/policy-security-regulation-compliance.adoc index 3d797eca66ec..a144dcc50776 100644 --- a/modules/policy-security-regulation-compliance.adoc +++ b/modules/policy-security-regulation-compliance.adoc @@ -27,7 +27,7 @@ Red Hat performs periodic vulnerability scanning of {product-title} using indust [id="firewall_{context}"] === Firewall and DDoS protection Each {product-title} cluster is protected by a secure network configuration at the cloud infrastructure level using firewall rules (AWS Security Groups or Google Cloud Compute Engine firewall rules). {product-title} customers on AWS are also protected against DDoS attacks with link:https://docs.aws.amazon.com/waf/latest/developerguide/ddos-overview.html[AWS Shield Standard]. -Similarly, all GCP load balancers and public IP addresses used by {product-title} on GCP are protected against DDoS attacks with link:https://cloud.google.com/armor/docs/managed-protection-overview[Google Cloud Armor Standard]. +Similarly, all {gcp-short} load balancers and public IP addresses used by {product-title} on {gcp-short} are protected against DDoS attacks with link:https://cloud.google.com/armor/docs/managed-protection-overview[Google Cloud Armor Standard]. [id="Component-traffic-flow-encryption_{context}"] === Component and traffic flow encryption @@ -62,7 +62,7 @@ Any issues that are discovered are prioritized based on severity. Any issues fou .Security and control certifications for {product-title} [cols= "3,3,3",options="header"] |=== -| Compliance | {product-title} on AWS | {product-title} on GCP +| Compliance | {product-title} on AWS | {product-title} on {gcp-full} | HIPAA Qualified | Yes (Only Customer Cloud Subscriptions) | Yes (Only Customer Cloud Subscriptions) diff --git a/modules/private-clusters-about-gcp.adoc b/modules/private-clusters-about-gcp.adoc index 5c1e27c1ef95..83adc4e67703 100644 --- a/modules/private-clusters-about-gcp.adoc +++ b/modules/private-clusters-about-gcp.adoc @@ -3,11 +3,11 @@ // * installing/installing_gcp/installing-gcp-private.adoc [id="private-clusters-about-gcp_{context}"] -= Private clusters in GCP += Private clusters in {gcp-full} -To create a private cluster on Google Cloud Platform (GCP), you must provide an existing private VPC and subnets to host the cluster. The installation program must also be able to resolve the DNS records that the cluster requires. The installation program configures the Ingress Operator and API server for only internal traffic. +To create a private cluster on {gcp-first}, you must provide an existing private VPC and subnets to host the cluster. The installation program must also be able to resolve the DNS records that the cluster requires. The installation program configures the Ingress Operator and API server for only internal traffic. -The cluster still requires access to internet to access the GCP APIs. +The cluster still requires access to internet to access the {gcp-short} APIs. The following items are not required or created when you install a private cluster: @@ -32,7 +32,7 @@ The internal load balancer relies on instance groups rather than the target pool No health check for the Machine config server, `/healthz`, runs because of a difference in load balancer functionality. Two internal load balancers cannot share a single IP address, but two network load balancers can share a single external IP address. Instead, the health of an instance is determined entirely by the `/readyz` check on port 6443. //// -Is this also valid in GCP? +Is this also valid in {gcp-full}? The ability to add public functionality to a private cluster is limited. diff --git a/modules/private-clusters-about.adoc b/modules/private-clusters-about.adoc index 85e04de6eb2b..b289b4659fcc 100644 --- a/modules/private-clusters-about.adoc +++ b/modules/private-clusters-about.adoc @@ -34,6 +34,6 @@ By default, the installation program creates appropriate network load balancers On Amazon Web Services (AWS), separate public and private load balancers are created. The load balancers are identical except that an additional port is available on the internal one for use within the cluster. Although the installation program automatically creates or destroys the load balancer based on API server requirements, the cluster does not manage or maintain them. As long as you preserve the cluster's access to the API server, you can manually modify or move the load balancers. For the public load balancer, port 6443 is open and the health check is configured for HTTPS against the `/readyz` path. -On Google Cloud Platform, a single load balancer is created to manage both internal and external API traffic, so you do not need to modify the load balancer. +On {gcp-full}, a single load balancer is created to manage both internal and external API traffic, so you do not need to modify the load balancer. On Microsoft Azure, both public and private load balancers are created. However, because of limitations in current implementation, you just retain both load balancers in a private cluster. diff --git a/modules/private-service-connect-create.adoc b/modules/private-service-connect-create.adoc index 6cdb0f31826b..7db531d692f8 100644 --- a/modules/private-service-connect-create.adoc +++ b/modules/private-service-connect-create.adoc @@ -7,4 +7,4 @@ = Creating a private cluster with Private Service Connect Private Service Connect is supported with the Customer Cloud Subscription (CCS) infrastructure type only. To create an {product-title} on {GCP} using PSC, see - xref:../osd_gcp_clusters/creating-a-gcp-cluster.adoc#osd-create-gcp-cluster-ccs_osd-creating-a-cluster-on-gcp[Creating a cluster on GCP with CCS]. \ No newline at end of file + xref:../osd_gcp_clusters/creating-a-gcp-cluster.adoc#osd-create-gcp-cluster-ccs_osd-creating-a-cluster-on-gcp[Creating a cluster on {gcp-full} with CCS]. \ No newline at end of file diff --git a/modules/private-service-connect-prereqs.adoc b/modules/private-service-connect-prereqs.adoc index 96e7c0f17959..2fb4368b7bfc 100644 --- a/modules/private-service-connect-prereqs.adoc +++ b/modules/private-service-connect-prereqs.adoc @@ -31,7 +31,7 @@ For more information about the prerequisites that must be completed before deplo [NOTE] ==== -PSC is supported with the Customer Cloud Subscription (CCS) infrastructure type only. To create an {product-title} on {GCP} using PSC, see _Creating a cluster on GCP with Workload Identity Federation_. +PSC is supported with the Customer Cloud Subscription (CCS) infrastructure type only. To create an {product-title} on {GCP} using PSC, see _Creating a cluster on {gcp-short} with Workload Identity Federation_. ==== // [id="prereqs-wif-authentication_{context}"] diff --git a/modules/private-service-connect-psc-architecture.adoc b/modules/private-service-connect-psc-architecture.adoc index 4cac52ff288d..8ac7fa854d67 100644 --- a/modules/private-service-connect-psc-architecture.adoc +++ b/modules/private-service-connect-psc-architecture.adoc @@ -11,11 +11,11 @@ The PSC architecture includes producer services and consumer services. Using PSC The following image depicts how Red HAT SREs and other internal resources access and support clusters created using PSC. -* A unique PSC service attachment is created for each OSD cluster in the customer GCP project. The PSC service attachment points to the cluster API server load balancer created in the customer GCP project. +* A unique PSC service attachment is created for each OSD cluster in the customer {gcp-short} project. The PSC service attachment points to the cluster API server load balancer created in the customer {gcp-short} project. -* Similar to service attachments, a unique PSC endpoint is created in the Red Hat Management GCP project for each OSD cluster. +* Similar to service attachments, a unique PSC endpoint is created in the Red Hat Management {gcp-short} project for each OSD cluster. -* A dedicated subnet for GCP Private Service Connect is created in the cluster’s network within the customer GCP project. This is a special subnet type where the producer services are published via PSC service attachments. This subnet is used to Source NAT (SNAT) incoming requests to the cluster API server. Additionally, the PSC subnet must be within the Machine CIDR range and cannot be used in more than one service attachment. +* A dedicated subnet for {gcp-short} Private Service Connect is created in the cluster's network within the customer {gcp-short} project. This is a special subnet type where the producer services are published via PSC service attachments. This subnet is used to Source NAT (SNAT) incoming requests to the cluster API server. Additionally, the PSC subnet must be within the Machine CIDR range and cannot be used in more than one service attachment. * Red Hat internal resources and SREs access private OSD clusters using the connectivity between a PSC endpoint and service attachment. Even though the traffic transits multiple VPC networks, it remains entirely within Google Cloud. diff --git a/modules/registry-configuring-storage-gcp-user-infra.adoc b/modules/registry-configuring-storage-gcp-user-infra.adoc index 6727655d4f3a..1d98fad86b95 100644 --- a/modules/registry-configuring-storage-gcp-user-infra.adoc +++ b/modules/registry-configuring-storage-gcp-user-infra.adoc @@ -4,17 +4,17 @@ :_mod-docs-content-type: PROCEDURE [id="registry-configuring-storage-gcp-user-infra_{context}"] -= Configuring the registry storage for GCP with user-provisioned infrastructure += Configuring the registry storage for {gcp-short} with user-provisioned infrastructure -If the Registry Operator cannot create a Google Cloud Platform (GCP) bucket, you must set up the storage medium manually and configure the settings in the registry custom resource (CR). +If the Registry Operator cannot create a {gcp-first} bucket, you must set up the storage medium manually and configure the settings in the registry custom resource (CR). .Prerequisites -* A cluster on GCP with user-provisioned infrastructure. -* To configure registry storage for GCP, you need to provide Registry Operator +* A cluster on {gcp-full} with user-provisioned infrastructure. +* To configure registry storage for {gcp-full}, you need to provide Registry Operator cloud credentials. -* For GCS on GCP storage, the secret is expected to contain one key whose value is the -contents of a credentials file provided by GCP: +* For GCS on {gcp-full} storage, the secret is expected to contain one key whose value is the +contents of a credentials file provided by {gcp-full}: ** `REGISTRY_STORAGE_GCS_KEYFILE` .Procedure diff --git a/modules/registry-operator-config-resources-secret-gcp.adoc b/modules/registry-operator-config-resources-secret-gcp.adoc index 08a0f7f586f3..0dc1c7c241e3 100644 --- a/modules/registry-operator-config-resources-secret-gcp.adoc +++ b/modules/registry-operator-config-resources-secret-gcp.adoc @@ -14,8 +14,8 @@ The `image-registry-private-configuration-user` secret provides credentials needed for storage access and management. It overrides the default credentials used by the Operator, if default credentials were found. -For GCS on GCP storage, the secret is expected to contain one key whose value is the -contents of a credentials file provided by GCP: +For GCS on {gcp-full} storage, the secret is expected to contain one key whose value is the +contents of a credentials file provided by {gcp-full}: * `REGISTRY_STORAGE_GCS_KEYFILE` diff --git a/modules/registry-operator-configuration-resource-overview-gcp-gcs.adoc b/modules/registry-operator-configuration-resource-overview-gcp-gcs.adoc index 2af302b20c8e..a267ca0f7c4c 100644 --- a/modules/registry-operator-configuration-resource-overview-gcp-gcs.adoc +++ b/modules/registry-operator-configuration-resource-overview-gcp-gcs.adoc @@ -3,9 +3,9 @@ // * openshift_images/configuring-registry-operator.adoc [id="registry-operator-configuration-resource-overview-gcp-gcs_{context}"] -= Image Registry Operator configuration parameters for GCP GCS += Image Registry Operator configuration parameters for {gcp-short} GCS -The following configuration parameters are available for GCP GCS registry storage. +The following configuration parameters are available for {gcp-short} GCS registry storage. [cols="3a,8a",options="header"] |=== @@ -20,12 +20,12 @@ It is optional and is generated if not provided. set based on the installed GCS Region. |`projectID` -|ProjectID is the Project ID of the GCP project that this bucket should be +|ProjectID is the Project ID of the {gcp-short} project that this bucket should be associated with. It is optional. |`keyID` |KeyID is the KMS key ID to use for encryption. It is optional because -buckets are encrypted by default on GCP. This allows for the use of a custom +buckets are encrypted by default on {gcp-short}. This allows for the use of a custom encryption key. |=== diff --git a/modules/sd-vs-ocp.adoc b/modules/sd-vs-ocp.adoc index 47cb1b01ff3a..d2e49a3d1cac 100644 --- a/modules/sd-vs-ocp.adoc +++ b/modules/sd-vs-ocp.adoc @@ -27,7 +27,7 @@ endif::openshift-rosa,openshift-rosa-hcp[] |Customers can choose their computing resources. | ifdef::openshift-dedicated[] -{product-title} is hosted and managed in a public cloud (Amazon Web Services or Google Cloud Platform) either owned by Red{nbsp}Hat or provided by the customer. +{product-title} is hosted and managed in a public cloud (Amazon Web Services or {gcp-full}) either owned by Red{nbsp}Hat or provided by the customer. endif::openshift-dedicated[] ifdef::openshift-rosa[] {product-title} is hosted and managed in a public cloud (Amazon Web Services) provided by the customer. diff --git a/modules/sdpolicy-am-cloud-providers.adoc b/modules/sdpolicy-am-cloud-providers.adoc index d5371681add6..f9cac63c9dfd 100644 --- a/modules/sdpolicy-am-cloud-providers.adoc +++ b/modules/sdpolicy-am-cloud-providers.adoc @@ -8,4 +8,4 @@ {product-title} offers OpenShift Container Platform clusters as a managed service on the following cloud providers: * Amazon Web Services (AWS) -* Google Cloud Platform (GCP) \ No newline at end of file +* {gcp-full} \ No newline at end of file diff --git a/modules/sdpolicy-logging.adoc b/modules/sdpolicy-logging.adoc index e4a660c03f84..19dbf0f74a8c 100644 --- a/modules/sdpolicy-logging.adoc +++ b/modules/sdpolicy-logging.adoc @@ -4,14 +4,14 @@ [id="sdpolicy-logging_{context}"] = Logging -{product-title} provides optional integrated log forwarding to Amazon CloudWatch (on AWS) or Google Cloud Logging (on GCP). +{product-title} provides optional integrated log forwarding to Amazon CloudWatch (on AWS) or Google Cloud Logging (on {gcp-short}). For more information, see link:https://docs.openshift.com/dedicated/observability/logging/log_collection_forwarding/log-forwarding.html[About log collection and forwarding]. [id="audit-logging_{context}"] == Cluster audit logging -Cluster audit logs are available through Amazon CloudWatch (on AWS) or Google Cloud Logging (on GCP), if the integration is enabled. If the integration is not enabled, you can request the audit logs by opening a support case. Audit log requests must specify a date and time range not to exceed 21 days. When requesting audit logs, customers should be aware that audit logs are many GB per day in size. +Cluster audit logs are available through Amazon CloudWatch (on AWS) or Google Cloud Logging (on {gcp-short}), if the integration is enabled. If the integration is not enabled, you can request the audit logs by opening a support case. Audit log requests must specify a date and time range not to exceed 21 days. When requesting audit logs, customers should be aware that audit logs are many GB per day in size. [id="application-logging_{context}"] == Application logging -Application logs sent to `STDOUT` are forwarded to Amazon CloudWatch (on AWS) or Google Cloud Logging (on GCP) through the cluster logging stack, if it is installed. +Application logs sent to `STDOUT` are forwarded to Amazon CloudWatch (on AWS) or Google Cloud Logging (on {gcp-short}) through the cluster logging stack, if it is installed. diff --git a/modules/sdpolicy-security.adoc b/modules/sdpolicy-security.adoc index 3877c869dd12..6091faf02ca6 100644 --- a/modules/sdpolicy-security.adoc +++ b/modules/sdpolicy-security.adoc @@ -60,7 +60,7 @@ $ oc adm policy add-cluster-role-to-group self-provisioner system:authenticated: .Security and control certifications for {product-title} [cols= "3,3,3",options="header"] |=== -| Compliance | {product-title} on AWS | {product-title} on GCP +| Compliance | {product-title} on AWS | {product-title} on {gcp-full} | HIPAA Qualified | Yes (Only Customer Cloud Subscriptions) | Yes (Only Customer Cloud Subscriptions) @@ -78,7 +78,7 @@ $ oc adm policy add-cluster-role-to-group self-provisioner system:authenticated: [id="network-security_{context}"] == Network security Each {product-title} cluster is protected by a secure network configuration at the cloud infrastructure level using firewall rules (AWS Security Groups or Google Cloud Compute Engine firewall rules). {product-title} customers on AWS are also protected against DDoS attacks with link:https://docs.aws.amazon.com/waf/latest/developerguide/ddos-overview.html[AWS Shield Standard]. -Similarly, all GCP load balancers and public IP addresses used by {product-title} on GCP are protected against DDoS attacks with link:https://cloud.google.com/armor/docs/managed-protection-overview[Google Cloud Armor Standard]. +Similarly, all {gcp-short} load balancers and public IP addresses used by {product-title} on {gcp-short} are protected against DDoS attacks with link:https://cloud.google.com/armor/docs/managed-protection-overview[Google Cloud Armor Standard]. [id="etcd-encryption_{context}"] == etcd encryption diff --git a/modules/sdpolicy-storage.adoc b/modules/sdpolicy-storage.adoc index 0f8d620ca13b..3a4f0ac44f2c 100644 --- a/modules/sdpolicy-storage.adoc +++ b/modules/sdpolicy-storage.adoc @@ -20,7 +20,7 @@ Persistent volumes (PVs) are backed by AWS EBS and Google Cloud persistent disk PVs can only be attached to a single node at a time and are specific to the availability zone in which they were provisioned, but they can be attached to any node in the availability zone. -Each cloud provider has its own limits for how many PVs can be attached to a single node. See link:https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/volume_limits.html#instance-type-volume-limits[AWS instance type limits] or link:https://cloud.google.com/compute/docs/machine-types#custom_machine_types[Google Cloud Platform custom machine types ] for details. +Each cloud provider has its own limits for how many PVs can be attached to a single node. See link:https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/volume_limits.html#instance-type-volume-limits[AWS instance type limits] or link:https://cloud.google.com/compute/docs/machine-types#custom_machine_types[{gcp-full} custom machine types ] for details. [id="shared-storage_{context}"] == Shared storage (RWX) diff --git a/modules/service-account-auth-overview.adoc b/modules/service-account-auth-overview.adoc index 70ebb27fb503..1f43b57dceb7 100644 --- a/modules/service-account-auth-overview.adoc +++ b/modules/service-account-auth-overview.adoc @@ -13,5 +13,5 @@ Service account keys are a security risk if not managed carefully. Users should [IMPORTANT] ===== -Because of the potential security risk when using the Service Account authentication type, Red Hat recommends using GCP Workload Identity Federation (WIF) as the authentication type for installing and interacting with the OpenShift Dedicated cluster deployed on Google Cloud Platform (GCP) because it provides enhanced security. For more information, see _Creating a cluster on GCP with Workload Identity Federation authentication_ in the _Additional resources_ section. +Because of the potential security risk when using the Service Account authentication type, Red Hat recommends using {gcp-first} Workload Identity Federation (WIF) as the authentication type for installing and interacting with the OpenShift Dedicated cluster deployed on {gcp-first} because it provides enhanced security. For more information, see _Creating a cluster on {gcp-first} with Workload Identity Federation authentication_ in the _Additional resources_ section. ===== \ No newline at end of file diff --git a/modules/sre-cluster-access.adoc b/modules/sre-cluster-access.adoc index d74cb3ca7295..f85fd2a8b091 100644 --- a/modules/sre-cluster-access.adoc +++ b/modules/sre-cluster-access.adoc @@ -29,7 +29,7 @@ The information presented below is an overview of the process an SRE must perfor *** Accessing a PrivateLink cluster: Request is sent to the Red{nbsp}Hat Transit Gateway, which then connects to a Red{nbsp}Hat VPC per region. The VPC that receives the request will be dependent on the target private cluster's region. Within the VPC, there is a private subnet that contains the PrivateLink endpoint to the customer's PrivateLink cluster. ifdef::openshift-dedicated[] -*** Accessing a Private Service Connect (PSC) cluster: Request is sent to Red{nbsp}Hat's internal backend infrastructure, which routes the traffic through a secured, trusted network to Red{nbsp}Hat's Management project in GCP. The Red{nbsp}Hat Management project includes VPC, which is configured with subnets in multiple regions, each containing a PSC endpoint that provides private access to the customer's cluster in the respective region. The traffic is routed through the appropriate regional subnet, ensuring secure and private access to the cluster without traversing the public internet. +*** Accessing a Private Service Connect (PSC) cluster: Request is sent to Red{nbsp}Hat's internal backend infrastructure, which routes the traffic through a secured, trusted network to Red{nbsp}Hat's Management project in {gcp-short}. The Red{nbsp}Hat Management project includes VPC, which is configured with subnets in multiple regions, each containing a PSC endpoint that provides private access to the customer's cluster in the respective region. The traffic is routed through the appropriate regional subnet, ensuring secure and private access to the cluster without traversing the public internet. endif::openshift-dedicated[] ifdef::openshift-rosa[] diff --git a/modules/storage-persistent-storage-block-volume.adoc b/modules/storage-persistent-storage-block-volume.adoc index 16ce342b5867..12c5687f0702 100644 --- a/modules/storage-persistent-storage-block-volume.adoc +++ b/modules/storage-persistent-storage-block-volume.adoc @@ -36,7 +36,7 @@ ifndef::openshift-dedicated,openshift-rosa,openshift-rosa-hcp[] |Fibre Channel | ✅ | | ✅ endif::openshift-dedicated,openshift-rosa,openshift-rosa-hcp[] ifndef::openshift-rosa,openshift-rosa-hcp[] -|GCP | ✅ | ✅ | ✅ +|{gcp-full} | ✅ | ✅ | ✅ endif::openshift-rosa,openshift-rosa-hcp[] ifndef::openshift-dedicated,openshift-rosa,openshift-rosa-hcp[] |HostPath | | | diff --git a/modules/storage-persistent-storage-pv.adoc b/modules/storage-persistent-storage-pv.adoc index 291c6ee1dbb7..724bc897a12d 100644 --- a/modules/storage-persistent-storage-pv.adoc +++ b/modules/storage-persistent-storage-pv.adoc @@ -64,8 +64,8 @@ ifdef::openshift-enterprise,openshift-webscale,openshift-origin[] - Fibre Channel endif::openshift-enterprise,openshift-webscale,openshift-origin[] ifndef::openshift-rosa,openshift-rosa-hcp[] -- GCP Persistent Disk -- GCP Filestore +- {gcp-short} Persistent Disk +- {gcp-full} Filestore endif::openshift-rosa,openshift-rosa-hcp[] ifdef::openshift-enterprise,openshift-webscale,openshift-origin[] - {ibm-power-server-title} Block @@ -159,8 +159,8 @@ ifdef::openshift-enterprise,openshift-webscale,openshift-origin[] |Fibre Channel | ✅ | ✅ |✅ | ✅ ^[3]^ endif::[] ifndef::openshift-rosa,openshift-rosa-hcp[] -|GCP Persistent Disk | ✅ ^[4]^ |✅ | ✅ | ✅ ^[4]^ -|GCP Filestore | ✅ | ✅ |✅ | ✅ +|{gcp-full} Persistent Disk | ✅ ^[4]^ |✅ | ✅ | ✅ ^[4]^ +|{gcp-full} Filestore | ✅ | ✅ |✅ | ✅ endif::openshift-rosa,openshift-rosa-hcp[] ifdef::openshift-enterprise,openshift-webscale,openshift-origin[] //|GlusterFS | ✅ |✅ | ✅ | ✅ @@ -186,7 +186,7 @@ endif::[] 3. Only raw block volumes support the `ReadWriteMany` (RWX) access mode for Fibre Channel and iSCSI. For more information, see "Block volume support". -4. For GCP hyperdisk-balanced disks: +4. For {gcp-short} hyperdisk-balanced disks: + * The supported access modes are: ** `ReadWriteOnce` diff --git a/modules/storage-persistent-storage-pvc-volumeattributesclass.adoc b/modules/storage-persistent-storage-pvc-volumeattributesclass.adoc index f18605116630..135e9fc47a3d 100644 --- a/modules/storage-persistent-storage-pvc-volumeattributesclass.adoc +++ b/modules/storage-persistent-storage-pvc-volumeattributesclass.adoc @@ -12,7 +12,7 @@ Volume Attributes Classes provide a way for administrators to describe "classes" :FeatureName: Volume Attributes Classes include::snippets/technology-preview.adoc[leveloffset=+1] -Volume Attributes Classes in {product-title} is available only with AWS Elastic Block Storage (EBS) and Google Cloud Platform (GCP) persistent disk (PD) Container Storage Interface (CSI). +Volume Attributes Classes in {product-title} is available only with AWS Elastic Block Storage (EBS) and {gcp-first} persistent disk (PD) Container Storage Interface (CSI). You can apply a Volume Attributes Class to a persistent volume claim (PVC). If a new Volume Attributes Class becomes available in the cluster, a user can update the PVC with the new Volume Attributes Class if needed. @@ -21,7 +21,7 @@ Volume Attributes Classes have parameters that describe volumes belonging to the == Limitations Volume Attributes Classes has the following limitations: -* With GCP PD, volume modification using Volume Attributes Classes is only possible for hyperdisk-balanced disk types. +* With {gcp-short} PD, volume modification using Volume Attributes Classes is only possible for hyperdisk-balanced disk types. * No more than 512 parameters can be defined for a `VolumeAttributesClass`. diff --git a/modules/storage-persistent-storage-selinuxChangePolicy.adoc b/modules/storage-persistent-storage-selinuxChangePolicy.adoc index 409fc30cc375..0b10c04c31bd 100644 --- a/modules/storage-persistent-storage-selinuxChangePolicy.adoc +++ b/modules/storage-persistent-storage-selinuxChangePolicy.adoc @@ -11,15 +11,15 @@ SELinux (Security-Enhanced Linux) is a security mechanism that assigns security When a pod starts, the container runtime recursively relabels all files on a volume to match the pod's SELinux context. For volumes with many files, this can significantly increase pod startup times. -Mount option specifies avoiding recursive relabeling of all files by attempting to mount the volume with the correct SELinux label directly using the -o context mount option, thus helping to avoid pod timeout problems. +Mount option specifies avoiding recursive relabeling of all files by attempting to mount the volume with the correct SELinux label directly using the -o context mount option, thus helping to avoid pod timeout problems. .RWOP and SELinux mount option -ReadWriteOncePod (RWOP) persistent volumes use the SELinux mount feature by default. +ReadWriteOncePod (RWOP) persistent volumes use the SELinux mount feature by default. The mount option feature is driver dependent, and enabled by default in AWS EBS ifndef::openshift-dedicated,openshift-rosa,openshift-rosa-hcp[] -, Azure Disk, GCP PD, {ibm-cloud-title} Block Storage volume, Cinder, vSphere, +, Azure Disk, {gcp-short} PD, {ibm-cloud-title} Block Storage volume, Cinder, vSphere, endif::openshift-dedicated,openshift-rosa,openshift-rosa-hcp[] and {rh-storage-first}. For third-party drivers, contact your storage vendor. diff --git a/modules/understanding-clusters.adoc b/modules/understanding-clusters.adoc index 648f0d8a6ca2..9273e5072463 100644 --- a/modules/understanding-clusters.adoc +++ b/modules/understanding-clusters.adoc @@ -6,16 +6,16 @@ [id="overview-of-osd-cloud-deployment-options_{context}"] = Overview of the {product-title} cloud deployment options -{product-title} offers {OCP} clusters as a managed service on {AWS} or {GCP}. +{product-title} offers {OCP} clusters as a managed service on {AWS} or {gcp-full}. -Through the Customer Cloud Subscription (CCS) model, you can deploy clusters in an existing AWS or GCP cloud account that you own. +Through the Customer Cloud Subscription (CCS) model, you can deploy clusters in an existing AWS or {gcp-short} cloud account that you own. Alternatively, you can install {product-title} in a cloud account that is owned by Red Hat. [id="osd-deployment-option-ccs_{context}"] == Deploying clusters using the Customer Cloud Subscription (CCS) model -The Customer Cloud Subscription (CCS) model enables you to deploy Red Hat managed {product-title} clusters in an existing {AWS} or {GCP} account that you own. Red Hat requires several prerequisites be met in order to provide this service, and this service is supported by Red Hat Site Reliability Engineers (SRE). +The Customer Cloud Subscription (CCS) model enables you to deploy Red Hat managed {product-title} clusters in an existing {AWS} or {gcp-full} account that you own. Red Hat requires several prerequisites be met in order to provide this service, and this service is supported by Red Hat Site Reliability Engineers (SRE). In the CCS model, the customer pays the cloud infrastructure provider directly for cloud costs, and the cloud infrastructure account is part of an organization owned by the customer, with specific access granted to Red Hat. In this model, the customer pays Red Hat for the CCS subscription and pays the cloud provider for the cloud costs. @@ -24,4 +24,4 @@ By using the CCS model, you can use the services that are provided by your cloud [id="osd-deployment-option-red-hat-cloud-account_{context}"] == Deploying clusters in Red Hat cloud accounts -As an alternative to the CCS model, you can deploy {product-title} clusters in AWS or GCP cloud accounts that are owned by Red Hat. With this model, Red Hat is responsible for the cloud account and the cloud infrastructure costs are paid directly by Red Hat. The customer only pays the Red Hat subscription costs. +As an alternative to the CCS model, you can deploy {product-title} clusters in AWS or {gcp-short} cloud accounts that are owned by Red Hat. With this model, Red Hat is responsible for the cloud account and the cloud infrastructure costs are paid directly by Red Hat. The customer only pays the Red Hat subscription costs. diff --git a/modules/upgrade-manual.adoc b/modules/upgrade-manual.adoc index 45c6e1f88239..53b99cce49e2 100644 --- a/modules/upgrade-manual.adoc +++ b/modules/upgrade-manual.adoc @@ -29,7 +29,7 @@ You can use {cluster-manager} to manually upgrade your {product-title} cluster o + [IMPORTANT] ==== -Before upgrading a Workload Identity Federation (WIF)-enabled {product-title} on {GCP} cluster, you must update the wif-config. For more information, see "Cluster upgrades with Workload Identity Federation (WIF)". +Before upgrading a Workload Identity Federation (WIF)-enabled {product-title} on {gcp-full} cluster, you must update the wif-config. For more information, see "Cluster upgrades with Workload Identity Federation (WIF)". ==== + . Click *Next*. diff --git a/modules/upgrade.adoc b/modules/upgrade.adoc index 3b84101e42ca..8dde1fa77485 100644 --- a/modules/upgrade.adoc +++ b/modules/upgrade.adoc @@ -11,7 +11,7 @@ When upgrades are made available for your {product-title} cluster, you can upgra [IMPORTANT] ==== -Before upgrading a Workload Identity Federation (WIF)-enabled {product-title} on {GCP} cluster, you must update the wif-config. For more information, see "Cluster upgrades with Workload Identity Federation (WIF)". +Before upgrading a Workload Identity Federation (WIF)-enabled {product-title} on {gcp-full} cluster, you must update the wif-config. For more information, see "Cluster upgrades with Workload Identity Federation (WIF)". ==== Red Hat Site Reliability Engineers (SRE) will provide a curated list of available versions for your {product-title} clusters. For each cluster you will be able to review the full list of available releases, as well as the corresponding release notes. {cluster-manager} will enable installation of clusters at the latest supported versions, and upgrades can be canceled at any time. @@ -70,7 +70,7 @@ For recurring upgrades, you will also receive email notifications before the upg [id="wif-upgrades_{context}"] == Cluster upgrades with Workload Identity Federation (WIF) -Before upgrading an {product-title} on {GCP} cluster with WIF authentication type to a newer y-stream version, you must update the WIF configuration to that version as well. Failure to do so before attempting to upgrade the cluster version will result in an error. +Before upgrading an {product-title} on {gcp-full} cluster with WIF authentication type to a newer y-stream version, you must update the WIF configuration to that version as well. Failure to do so before attempting to upgrade the cluster version will result in an error. For more information on how to update a WIF configuration, see the _Additional resources_ section. [NOTE] diff --git a/modules/wif-overview.adoc b/modules/wif-overview.adoc index ca99871be40c..6009c7c53295 100644 --- a/modules/wif-overview.adoc +++ b/modules/wif-overview.adoc @@ -7,13 +7,13 @@ [id="workload-identity-federation-overview_{context}"] = Workload Identity Federation overview -Workload Identity Federation (WIF) is a {GCP} Identity and Access Management (IAM) feature that provides third parties a secure method to access resources on a customer's cloud account. WIF eliminates the need for service account keys, and is Google Cloud's preferred method of credential authentication. +Workload Identity Federation (WIF) is a {gcp-full} Identity and Access Management (IAM) feature that provides third parties a secure method to access resources on a customer's cloud account. WIF eliminates the need for service account keys, and is Google Cloud's preferred method of credential authentication. While service account keys can provide powerful access to your Google Cloud resources, they must be maintained by the end user and can be a security risk if they are not managed properly. WIF does not use service keys as an access method for your Google cloud resources. Instead, WIF grants access by using credentials from external identity providers to generate short-lived credentials for workloads. The workloads can then use these credentials to temporarily impersonate service accounts and access Google Cloud resources. This removes the burden of having to properly maintain service account keys, and removes the risk of unauthorized users gaining access to service account keys. The following bulleted items provides a basic overview of the Workload Identity Federation process: -* The owner of the {GCP} project configures a workload identity pool with an identity provider, allowing {product-title} to access the project's associated service accounts using short-lived credentials. +* The owner of the {gcp-full} project configures a workload identity pool with an identity provider, allowing {product-title} to access the project's associated service accounts using short-lived credentials. * This workload identity pool is configured to authenticate requests using an Identity Provider (IP) that the user defines. * For applications to get access to cloud resources, they first pass credentials to Google's Security Token Service (STS). STS uses the specified identity provider to verify the credentials. * Once the credentials are verified, STS returns a temporary access token to the caller, giving the application the ability to impersonate the service account bound to that identity. @@ -24,7 +24,7 @@ Operators also need access to cloud resources. By using WIF instead of service a // * The external application calls Google Security Token Service to exchange the account credentials for a short-lived Google Cloud access token. // * The token can then be used to impersonate a service account and access Google Cloud resources. -For more information about Workload Identity Federation, see the link:https://cloud.google.com/iam/docs/workload-identity-federation[Google Cloud Platform documentation]. +For more information about Workload Identity Federation, see the link:https://cloud.google.com/iam/docs/workload-identity-federation[{gcp-full} documentation]. [IMPORTANT] ==== diff --git a/modules/windows-machineset-gcp.adoc b/modules/windows-machineset-gcp.adoc index b97917ea0299..c7a1427b52d9 100644 --- a/modules/windows-machineset-gcp.adoc +++ b/modules/windows-machineset-gcp.adoc @@ -3,9 +3,9 @@ // * windows_containers/creating_windows_machinesets/creating-windows-machineset-gcp.adoc [id="windows-machineset-gcp_{context}"] -= Sample YAML for a Windows MachineSet object on GCP += Sample YAML for a Windows MachineSet object on {gcp-full} -This sample YAML file defines a Windows `MachineSet` object running on Google Cloud Platform (GCP) that the Windows Machine Config Operator (WMCO) can use. +This sample YAML file defines a Windows `MachineSet` object running on {gcp-first} that the Windows Machine Config Operator (WMCO) can use. [source,yaml] ---- @@ -74,7 +74,7 @@ $ oc get -o jsonpath='{.status.infrastructureName}{"\n"}' infrastructure cluster <3> Configure the machine set as a Windows machine. <4> Configure the Windows node as a compute machine. <5> Specify the full path to an image of a supported version of Windows Server. -<6> Specify the GCP project that this cluster was created in. -<7> Specify the GCP region, such as `us-central1`. +<6> Specify the {gcp-short} project that this cluster was created in. +<7> Specify the {gcp-full} region, such as `us-central1`. <8> Created by the WMCO when it configures the first Windows machine. After that, the `windows-user-data` is available for all subsequent machine sets to consume. <9> Specify the zone within the chosen region, such as `us-central1-a`. diff --git a/networking/networking_operators/external_dns_operator/nw-creating-dns-records-on-gcp.adoc b/networking/networking_operators/external_dns_operator/nw-creating-dns-records-on-gcp.adoc index 02ac0ce42216..a0425332203c 100644 --- a/networking/networking_operators/external_dns_operator/nw-creating-dns-records-on-gcp.adoc +++ b/networking/networking_operators/external_dns_operator/nw-creating-dns-records-on-gcp.adoc @@ -1,7 +1,7 @@ :_mod-docs-content-type: ASSEMBLY -[id="creating-dns-records-on-gcp"] -= Creating DNS records on GCP include::_attributes/common-attributes.adoc[] +[id="creating-dns-records-on-gcp"] += Creating DNS records on {gcp-full} :context: creating-dns-records-on-gcp toc::[] diff --git a/networking/networking_operators/external_dns_operator/nw-installing-external-dns-operator-on-cloud-providers.adoc b/networking/networking_operators/external_dns_operator/nw-installing-external-dns-operator-on-cloud-providers.adoc index 6655fed55c25..871e2698b7a1 100644 --- a/networking/networking_operators/external_dns_operator/nw-installing-external-dns-operator-on-cloud-providers.adoc +++ b/networking/networking_operators/external_dns_operator/nw-installing-external-dns-operator-on-cloud-providers.adoc @@ -6,7 +6,7 @@ include::_attributes/common-attributes.adoc[] toc::[] -You can install the External DNS Operator on cloud providers such as AWS, Azure, and GCP. +You can install the External DNS Operator on cloud providers such as AWS, Azure, and {gcp-full}. // Installing the External DNS Operator with OperatorHub include::modules/nw-installing-external-dns-operator.adoc[leveloffset=+1] diff --git a/networking/ovn_kubernetes_network_provider/configuring-cluster-wide-proxy.adoc b/networking/ovn_kubernetes_network_provider/configuring-cluster-wide-proxy.adoc index d9670f8654a1..20a22e878c99 100644 --- a/networking/ovn_kubernetes_network_provider/configuring-cluster-wide-proxy.adoc +++ b/networking/ovn_kubernetes_network_provider/configuring-cluster-wide-proxy.adoc @@ -43,7 +43,7 @@ ifdef::openshift-dedicated[] // * For the installation prerequisites for ROSA clusters that use the AWS Security Token Service (STS), see xref:../../rosa_planning/rosa-sts-aws-prereqs.adoc#rosa-sts-aws-prerequisites[AWS prerequisites for ROSA with STS]. //* For the installation prerequisites for ROSA clusters that do not use STS, see xref:../../rosa_install_access_delete_clusters/rosa_getting_started_iam/rosa-aws-prereqs.adoc#prerequisites[AWS prerequisites for ROSA]. //endif::openshift-rosa[] -* For the installation prerequisites for {product-title} clusters that use the Customer Cloud Subscription (CCS) model, see xref:../../osd_planning/aws-ccs.adoc#aws-ccs[Customer Cloud Subscriptions on AWS] or xref:../../osd_planning/gcp-ccs.adoc#gcp-ccs[Customer Cloud Subscriptions on GCP]. +* For the installation prerequisites for {product-title} clusters that use the Customer Cloud Subscription (CCS) model, see xref:../../osd_planning/aws-ccs.adoc#aws-ccs[Customer Cloud Subscriptions on AWS] or xref:../../osd_planning/gcp-ccs.adoc#gcp-ccs[Customer Cloud Subscriptions on {gcp-full}]. endif::openshift-dedicated[] include::modules/configuring-a-proxy-trust-bundle-responsibilities.adoc[leveloffset=+1] @@ -79,7 +79,7 @@ ifdef::openshift-rosa[] endif::openshift-rosa[] ifdef::openshift-dedicated[] * xref:../../osd_aws_clusters/creating-an-aws-cluster.adoc#osd-create-aws-cluster-ccs_osd-creating-a-cluster-on-aws[Creating a cluster on AWS] -* xref:../../osd_gcp_clusters/creating-a-gcp-cluster-with-workload-identity-federation.adoc#osd-creating-a-cluster-on-gcp-with-workload-identity-federation[Creating a cluster on GCP with Workload Identity Federation authentication] +* xref:../../osd_gcp_clusters/creating-a-gcp-cluster-with-workload-identity-federation.adoc#osd-creating-a-cluster-on-gcp-with-workload-identity-federation[Creating a cluster on {gcp-full} with Workload Identity Federation authentication] endif::openshift-dedicated[] endif::openshift-rosa,openshift-dedicated[] diff --git a/nodes/pods/nodes-pods-short-term-auth.adoc b/nodes/pods/nodes-pods-short-term-auth.adoc index 8c4af9eb8fac..a8bb18b40bd5 100644 --- a/nodes/pods/nodes-pods-short-term-auth.adoc +++ b/nodes/pods/nodes-pods-short-term-auth.adoc @@ -31,7 +31,7 @@ To configure this authentication method, you must meet the following requirement * In your cloud provider console, you must have access as a user with privileges to manage Identity and Access Management (IAM) and federated identity configurations. -// Section to add with AWS and Azure content. Only documenting GCP at this time. +// Section to add with AWS and Azure content. Only documenting {gcp-full} at this time. //// [id="nodes-pods-short-term-auth-compatibility_{context}"] === Compatibility limitations diff --git a/observability/logging/log_collection_forwarding/logging-output-types.adoc b/observability/logging/log_collection_forwarding/logging-output-types.adoc index 12caf9f6f02c..8180f67248c9 100644 --- a/observability/logging/log_collection_forwarding/logging-output-types.adoc +++ b/observability/logging/log_collection_forwarding/logging-output-types.adoc @@ -30,4 +30,4 @@ The `fluentdForward` output is only supported if you are using the Fluentd colle ==== `syslog`:: An external log aggregation solution that supports the syslog link:https://tools.ietf.org/html/rfc3164[RFC3164] or link:https://tools.ietf.org/html/rfc5424[RFC5424] protocols. The `syslog` output can use a UDP, TCP, or TLS connection. `cloudwatch`:: Amazon CloudWatch, a monitoring and log storage service hosted by Amazon Web Services (AWS). -`cloudlogging`:: Google Cloud Logging, a monitoring and log storage service hosted by Google Cloud Platform (GCP). +`cloudlogging`:: Google Cloud Logging, a monitoring and log storage service hosted by {gcp-full}. diff --git a/osd_gcp_clusters/creating-a-gcp-cluster-redhat-account.adoc b/osd_gcp_clusters/creating-a-gcp-cluster-redhat-account.adoc index 34b78991ebcf..3127dc96ccc2 100644 --- a/osd_gcp_clusters/creating-a-gcp-cluster-redhat-account.adoc +++ b/osd_gcp_clusters/creating-a-gcp-cluster-redhat-account.adoc @@ -1,7 +1,7 @@ :_mod-docs-content-type: ASSEMBLY -[id="osd-creating-a-gcp-cluster-rh-account"] -= Creating a cluster on GCP with a Red Hat cloud account include::_attributes/attributes-openshift-dedicated.adoc[] +[id="osd-creating-a-gcp-cluster-rh-account"] += Creating a cluster on {gcp-full} with a Red Hat cloud account :context: osd-creating-a-gcp-cluster-rh-account toc::[] diff --git a/osd_gcp_clusters/creating-a-gcp-cluster-sa.adoc b/osd_gcp_clusters/creating-a-gcp-cluster-sa.adoc index 05356dce8b83..eb9bdaf411e6 100644 --- a/osd_gcp_clusters/creating-a-gcp-cluster-sa.adoc +++ b/osd_gcp_clusters/creating-a-gcp-cluster-sa.adoc @@ -1,7 +1,7 @@ :_mod-docs-content-type: ASSEMBLY -[id="osd-creating-a-cluster-on-gcp-sa"] -= Creating a cluster on GCP with Service Account authentication include::_attributes/attributes-openshift-dedicated.adoc[] +[id="osd-creating-a-cluster-on-gcp-sa"] += Creating a cluster on {gcp-full} with Service Account authentication :context: osd-creating-a-cluster-on-gcp-sa toc::[] @@ -26,7 +26,7 @@ include::modules/osd-create-cluster-ccs.adoc[leveloffset=+1] [id="additional-resources_{context}"] == Additional resources -* For information about Workload Identity Federation, see xref:../osd_gcp_clusters/creating-a-gcp-cluster-with-workload-identity-federation.adoc#osd-creating-a-cluster-on-gcp-with-workload-identity-federation[Creating a cluster on GCP with Workload Identity Federation authentication]. +* For information about Workload Identity Federation, see xref:../osd_gcp_clusters/creating-a-gcp-cluster-with-workload-identity-federation.adoc#osd-creating-a-cluster-on-gcp-with-workload-identity-federation[Creating a cluster on {gcp-full} with Workload Identity Federation authentication]. * For information about Private Service Connect (PSC), see xref:../osd_gcp_clusters/creating-a-gcp-psc-enabled-private-cluster.adoc#creating-a-gcp-psc-enabled-private-cluster[Private Service Connect overview]. * For information about configuring a proxy with {product-title}, see xref:../networking/ovn_kubernetes_network_provider/configuring-cluster-wide-proxy.adoc#configuring-a-cluster-wide-proxy[Configuring a cluster-wide proxy]. diff --git a/osd_gcp_clusters/creating-a-gcp-cluster-with-workload-identity-federation.adoc b/osd_gcp_clusters/creating-a-gcp-cluster-with-workload-identity-federation.adoc index 1fdb3f146139..866218ae2e97 100644 --- a/osd_gcp_clusters/creating-a-gcp-cluster-with-workload-identity-federation.adoc +++ b/osd_gcp_clusters/creating-a-gcp-cluster-with-workload-identity-federation.adoc @@ -1,7 +1,7 @@ :_mod-docs-content-type: ASSEMBLY -[id="osd-creating-a-cluster-on-gcp-with-workload-identity-federation"] -= Creating a cluster on GCP with Workload Identity Federation authentication include::_attributes/attributes-openshift-dedicated.adoc[] +[id="osd-creating-a-cluster-on-gcp-with-workload-identity-federation"] += Creating a cluster on {gcp-full} with Workload Identity Federation authentication :context: osd-creating-a-cluster-on-gcp-with-workload-identity-federation toc::[] diff --git a/osd_gcp_clusters/creating-a-gcp-psc-enabled-private-cluster.adoc b/osd_gcp_clusters/creating-a-gcp-psc-enabled-private-cluster.adoc index 6b622ddbc764..1913f8d741ce 100644 --- a/osd_gcp_clusters/creating-a-gcp-psc-enabled-private-cluster.adoc +++ b/osd_gcp_clusters/creating-a-gcp-psc-enabled-private-cluster.adoc @@ -16,7 +16,7 @@ include::modules/private-service-connect-psc-architecture.adoc[leveloffset=+1] == Next steps * To learn more about {product-title} on {GCP} cluster prerequisites, see xref:../osd_planning/gcp-ccs.adoc#ccs-gcp-customer-requirements_gcp-ccs[Customer Requirements]. -* To configure your firewalls, see xref:../osd_planning/gcp-ccs.adoc#osd-gcp-psc-firewall-prerequisites_gcp-ccs[GCP firewall prerequisites]. +* To configure your firewalls, see xref:../osd_planning/gcp-ccs.adoc#osd-gcp-psc-firewall-prerequisites_gcp-ccs[{gcp-full} firewall prerequisites]. * To create an {product-title} on {GCP} using PSC with the Workload Identity Federation authentication type, see - xref:../osd_gcp_clusters/creating-a-gcp-cluster-with-workload-identity-federation.adoc#osd-creating-a-cluster-on-gcp-with-workload-identity-federation[Creating a cluster on GCP with Workload Identity Federation authentication]. + xref:../osd_gcp_clusters/creating-a-gcp-cluster-with-workload-identity-federation.adoc#osd-creating-a-cluster-on-gcp-with-workload-identity-federation[Creating a cluster on {gcp-full} with Workload Identity Federation authentication]. diff --git a/osd_gcp_clusters/osd-deleting-a-cluster-gcp.adoc b/osd_gcp_clusters/osd-deleting-a-cluster-gcp.adoc index 56ad26066b60..4d2610b2b6bf 100644 --- a/osd_gcp_clusters/osd-deleting-a-cluster-gcp.adoc +++ b/osd_gcp_clusters/osd-deleting-a-cluster-gcp.adoc @@ -1,7 +1,7 @@ :_mod-docs-content-type: ASSEMBLY -[id="osd-deleting-a-cluster"] -= Deleting an {product-title} cluster on GCP include::_attributes/attributes-openshift-dedicated.adoc[] +[id="osd-deleting-a-cluster"] += Deleting an {product-title} cluster on {gcp-full} :context: osd-deleting-a-cluster-gcp toc::[] diff --git a/osd_getting_started/osd-getting-started.adoc b/osd_getting_started/osd-getting-started.adoc index 33a90c6b9bb2..93ed9869fb3a 100644 --- a/osd_getting_started/osd-getting-started.adoc +++ b/osd_getting_started/osd-getting-started.adoc @@ -23,15 +23,15 @@ You can install {product-title} in your own cloud provider account through the C Choose from one of the following methods to deploy your cluster. [id="osd-getting-started-create-cluster-gcp-ccs"] -=== Creating a cluster on GCP using the CCS model +=== Creating a cluster on {gcp-full} using the CCS model -You can install {product-title} in your own {GCP} account by using the CCS model. Complete the steps in one of the following sections to deploy {product-title} in your own GCP account. +You can install {product-title} in your own {GCP} account by using the CCS model. Complete the steps in one of the following sections to deploy {product-title} in your own {gcp-full} account. -* Red Hat recommends using GCP Workload Identity Federation (WIF) as the authentication type for installing and interacting with the {product-title} cluster deployed on GCP because it provides enhanced security. For more information, see xref:../osd_gcp_clusters/creating-a-gcp-cluster-with-workload-identity-federation.adoc#osd-creating-a-cluster-on-gcp-with-workload-identity-federation[Creating a cluster on GCP with Workload Identity Federation authentication]. +* Red Hat recommends using {gcp-full} Workload Identity Federation (WIF) as the authentication type for installing and interacting with the {product-title} cluster deployed on {gcp-full} because it provides enhanced security. For more information, see xref:../osd_gcp_clusters/creating-a-gcp-cluster-with-workload-identity-federation.adoc#osd-creating-a-cluster-on-gcp-with-workload-identity-federation[Creating a cluster on {gcp-full} with Workload Identity Federation authentication]. -* Red Hat also recommends creating an {product-title} cluster deployed on GCP in Private cluster mode with Private Service Connect (PSC) to manage and monitor a cluster to avoid all public ingress network traffic. For more information, see xref:../osd_gcp_clusters/creating-a-gcp-psc-enabled-private-cluster.adoc#creating-a-gcp-psc-enabled-private-cluster[Private Service Connect overview]. +* Red Hat also recommends creating an {product-title} cluster deployed on {gcp-full} in Private cluster mode with Private Service Connect (PSC) to manage and monitor a cluster to avoid all public ingress network traffic. For more information, see xref:../osd_gcp_clusters/creating-a-gcp-psc-enabled-private-cluster.adoc#creating-a-gcp-psc-enabled-private-cluster[Private Service Connect overview]. -* For installing and interacting with the {product-title} cluster deployed on GCP by using the Service Account authentication type, see xref:../osd_gcp_clusters/creating-a-gcp-cluster-sa.adoc#osd-create-gcp-cluster-ccs_osd-creating-a-cluster-on-gcp-sa[Creating a cluster on GCP with Service Account authentication]. +* For installing and interacting with the {product-title} cluster deployed on {gcp-full} by using the Service Account authentication type, see xref:../osd_gcp_clusters/creating-a-gcp-cluster-sa.adoc#osd-create-gcp-cluster-ccs_osd-creating-a-cluster-on-gcp-sa[Creating a cluster on {gcp-full} with Service Account authentication]. [id="osd-getting-started-create-cluster-aws-ccs"] === Creating a cluster on AWS using the CCS model @@ -45,7 +45,7 @@ You can install {product-title} in your own {AWS} account by using the CCS model Complete the steps in one of the following sections to deploy {product-title} in a cloud account that is owned by Red Hat: -* xref:../osd_gcp_clusters/creating-a-gcp-cluster-redhat-account.adoc#creating-a-gcp-cluster-rh-account[Creating a cluster on GCP with a Red Hat cloud account]: You can install {product-title} in an GCP account that is owned by Red Hat. +* xref:../osd_gcp_clusters/creating-a-gcp-cluster-redhat-account.adoc#creating-a-gcp-cluster-rh-account[Creating a cluster on {gcp-full} with a Red Hat cloud account]: You can install {product-title} in an {gcp-full} account that is owned by Red Hat. * xref:../osd_aws_clusters/creating-an-aws-cluster.adoc#osd-create-aws-cluster-red-hat-account_osd-creating-a-cluster-on-aws[Creating a cluster on AWS]: You can install {product-title} in an AWS account that is owned by Red Hat. // Update link when OSDOCS-12950 goes live. @@ -91,7 +91,7 @@ include::modules/deleting-cluster.adoc[leveloffset=+1] * For more information about deploying {product-title} clusters on AWS, see xref:../osd_aws_clusters/creating-an-aws-cluster.adoc#osd-create-aws-cluster-ccs_osd-creating-a-cluster-on-aws[Creating a cluster on AWS]. -* For more information about deploying {product-title} clusters on GCP, see xref:../osd_gcp_clusters/creating-a-gcp-cluster-sa.adoc#osd-creating-a-cluster-on-gcp-sa[Creating a cluster on GCP with Service Account authentication] and xref:../osd_gcp_clusters/creating-a-gcp-cluster-with-workload-identity-federation.adoc#osd-creating-a-cluster-on-gcp-with-workload-identity-federation[Creating a cluster on GCP with Workload Identity Federation authentication]. +* For more information about deploying {product-title} clusters on GCP, see xref:../osd_gcp_clusters/creating-a-gcp-cluster-sa.adoc#osd-creating-a-cluster-on-gcp-sa[Creating a cluster on {gcp-full} with Service Account authentication] and xref:../osd_gcp_clusters/creating-a-gcp-cluster-with-workload-identity-federation.adoc#osd-creating-a-cluster-on-gcp-with-workload-identity-federation[Creating a cluster on {gcp-full} with Workload Identity Federation authentication]. * For documentation on upgrading your cluster, see xref:../upgrading/osd-upgrades.adoc#osd-upgrades[{product-title} cluster upgrades]. diff --git a/osd_getting_started/osd-understanding-your-cloud-deployment-options.adoc b/osd_getting_started/osd-understanding-your-cloud-deployment-options.adoc index 09da650af3d6..9f9174ab8ff6 100644 --- a/osd_getting_started/osd-understanding-your-cloud-deployment-options.adoc +++ b/osd_getting_started/osd-understanding-your-cloud-deployment-options.adoc @@ -19,7 +19,7 @@ include::modules/understanding-clusters.adoc[leveloffset=+1] [id="additional-resources-cloud-deploy_{context}"] == Additional resources -* For more information about using Customer Cloud Subscriptions on GCP, see xref:../osd_planning/gcp-ccs.adoc#ccs-gcp-understand[Understanding Customer Cloud Subscriptions on GCP]. +* For more information about using Customer Cloud Subscriptions on GCP, see xref:../osd_planning/gcp-ccs.adoc#ccs-gcp-understand[Understanding Customer Cloud Subscriptions on {gcp-full}]. * For more information about using Customer Cloud Subscriptions on AWS, see xref:../osd_planning/aws-ccs.adoc#ccs-aws-understand[Understanding Customer Cloud Subscriptions on AWS]. diff --git a/osd_planning/gcp-ccs.adoc b/osd_planning/gcp-ccs.adoc index d6a5be438bcb..f528ae14c875 100644 --- a/osd_planning/gcp-ccs.adoc +++ b/osd_planning/gcp-ccs.adoc @@ -1,13 +1,13 @@ :_mod-docs-content-type: ASSEMBLY +include::_attributes/attributes-openshift-dedicated.adoc[] [id="gcp-ccs"] -= Customer Cloud Subscriptions on GCP += Customer Cloud Subscriptions on {gcp-full} :context: gcp-ccs -include::_attributes/attributes-openshift-dedicated.adoc[] toc::[] [role="_abstract"] -{product-title} provides a Customer Cloud Subscription (CCS) model that allows Red Hat to deploy and manage clusters in a customer’s existing Google Cloud Platform (GCP) account. +{product-title} provides a Customer Cloud Subscription (CCS) model that allows Red Hat to deploy and manage clusters in a customer's existing {gcp-full} account. include::modules/ccs-gcp-understand.adoc[leveloffset=+1] include::modules/ccs-gcp-customer-requirements.adoc[leveloffset=+1] @@ -24,6 +24,6 @@ include::modules/osd-gcp-psc-firewall-prerequisites.adoc[leveloffset=+1] * xref:../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] -* For more information about creating an {product-title} cluster with the Workload Identity Federation (WIF) authentication type, see xref:../osd_gcp_clusters/creating-a-gcp-cluster-with-workload-identity-federation.adoc#osd-creating-a-cluster-on-gcp-with-workload-identity-federation[Creating a cluster on GCP with Workload Identity Federation authentication]. +* For more information about creating an {product-title} cluster with the Workload Identity Federation (WIF) authentication type, see xref:../osd_gcp_clusters/creating-a-gcp-cluster-with-workload-identity-federation.adoc#osd-creating-a-cluster-on-gcp-with-workload-identity-federation[Creating a cluster on {gcp-full} with Workload Identity Federation authentication]. * For more information about the specific roles and permissions that are specific to clusters created when using the Workload Identity Federation (WIF) authentication type, see link:https://github.com/openshift/managed-cluster-config/blob/master/resources/wif/4.19/vanilla.yaml[managed-cluster-config]. diff --git a/osd_whats_new/osd-whats-new.adoc b/osd_whats_new/osd-whats-new.adoc index b0227d010a6b..20e01e7e7730 100644 --- a/osd_whats_new/osd-whats-new.adoc +++ b/osd_whats_new/osd-whats-new.adoc @@ -60,7 +60,7 @@ For more information about migrating to OVN-Kubernetes, see xref:../networking/o [id="osd-q1-2025_{context}"] === Q1 2025 -* **Support for new GCP instances.** {product-title} version 4.18 and later now supports `n4` and `c3` instance types on Google Cloud Platform. For more information, see xref:../osd_architecture/osd_policy/osd-service-definition.adoc#gcp-compute-types_osd-service-definition[Google Cloud compute types]. +* **Support for new {gcp-full} instances.** {product-title} version 4.18 and later now supports `n4` and `c3` instance types on {gcp-full}. For more information, see xref:../osd_architecture/osd_policy/osd-service-definition.adoc#gcp-compute-types_osd-service-definition[Google Cloud compute types]. * **New version of {product-title} available.** {product-title} on {gcp} and {product-title} on {aws} versions 4.18 are now available. For more information about upgrading to this latest version, see xref:../upgrading/osd-upgrades.adoc#osd-upgrades[Red Hat OpenShift Dedicated cluster upgrades]. * **Support for assigning newly created machine pools to specific availability zones within a Multi-AZ cluster.** @@ -83,28 +83,28 @@ For more information about migrating to OVN-Kubernetes, see xref:../networking/o [id="osd-q4-2024_{context}"] === Q4 2024 -* **Workload Identity Federation (WIF) authentication type is now available.** {product-title} on Google Cloud Platform (GCP) customers can now use WIF as an authentication type when creating a cluster. WIF is a GCP Identity and Access Management (IAM) feature that provides third parties a secure method to access resources on a customer's cloud account. +* **Workload Identity Federation (WIF) authentication type is now available.** {product-title} on {gcp-full} customers can now use WIF as an authentication type when creating a cluster. WIF is a {gcp-full} Identity and Access Management (IAM) feature that provides third parties a secure method to access resources on a customer's cloud account. WIF is Google Cloud's preferred method for credential authentication. + For more information, see xref:../osd_gcp_clusters/creating-a-gcp-cluster-with-workload-identity-federation.adoc[Creating a cluster on GCP with Workload Identity Federation authentication]. -* **Private Service Connect (PSC) networking feature is now available.** You can now create a private {product-title} cluster on Google Cloud Platform (GCP) using Google Cloud's security-enhanced networking feature Private Service Connect (PSC). +* **Private Service Connect (PSC) networking feature is now available.** You can now create a private {product-title} cluster on {gcp-full} using Google Cloud's security-enhanced networking feature Private Service Connect (PSC). + -PSC is a capability of Google Cloud networking that enables private communication between services across different GCP projects or organizations. Implementing PSC as part of your network connectivity allows you to deploy OpenShift Dedicated clusters in a private and secured environment within GCP without using any public-facing cloud resources. +PSC is a capability of Google Cloud networking that enables private communication between services across different {gcp-full} projects or organizations. Implementing PSC as part of your network connectivity allows you to deploy OpenShift Dedicated clusters in a private and secured environment within {gcp-full} without using any public-facing cloud resources. + For more information, see xref:../osd_gcp_clusters/creating-a-gcp-psc-enabled-private-cluster.adoc#creating-a-gcp-psc-enabled-private-cluster[Private Service Connect overview]. -* ** Support for GCP A3 instances with NVIDIA H100 80GB GPUs.** {product-title} on {GCP} now supports A3 instance types with NVIDIA H100 80GB GPUs. The GCP A3 instance type is available in all three zones of a GCP region, which is a prerequisite for multi-AZ deployment. For more information, see xref:../osd_architecture/osd_policy/osd-service-definition.adoc#gcp-compute-types_osd-service-definition[Google Cloud compute types]. +* ** Support for {gcp-full} A3 instances with NVIDIA H100 80GB GPUs.** {product-title} on {GCP} now supports A3 instance types with NVIDIA H100 80GB GPUs. The {gcp-full} A3 instance type is available in all three zones of a {gcp-full} region, which is a prerequisite for multi-AZ deployment. For more information, see xref:../osd_architecture/osd_policy/osd-service-definition.adoc#gcp-compute-types_osd-service-definition[Google Cloud compute types]. [id="osd-q3-2024_{context}"] === Q3 2024 -* ** Support for GCP A2 instance types with A100 80GB GPUs.** {product-title} on {GCP} now supports A2 instance types with A100 80GB GPUs. These instance types meet the specific requirements listed by IBM Watsonx.ai. For more information, see xref:../osd_architecture/osd_policy/osd-service-definition.adoc#gcp-compute-types_osd-service-definition[Google Cloud compute types]. +* ** Support for {gcp-full} A2 instance types with A100 80GB GPUs.** {product-title} on {GCP} now supports A2 instance types with A100 80GB GPUs. These instance types meet the specific requirements listed by IBM Watsonx.ai. For more information, see xref:../osd_architecture/osd_policy/osd-service-definition.adoc#gcp-compute-types_osd-service-definition[Google Cloud compute types]. -* **Expanded support for GCP standard instance types.** {product-title} on {GCP} now supports standard instance types for control plane and infrastructure nodes. +* **Expanded support for {gcp-full} standard instance types.** {product-title} on {GCP} now supports standard instance types for control plane and infrastructure nodes. For more information, see xref:../osd_planning/osd-limits-scalability.adoc#control-plane-and-infra-node-sizing-and-scaling-sd_osd-limits-scalability[Control plane and infrastructure node sizing and scaling]. * **{product-title} regions added.** {product-title} on {GCP} is now available in the following additional regions: @@ -126,11 +126,11 @@ For more information about region availabilities, see xref:../osd_architecture/o * **Cluster delete protection.** {product-title} on {GCP} users can now enable the cluster delete protection option, which helps to prevent users from accidentally deleting a cluster. //Removed link as is no longer valid. Need to decide if we need a link here and if so, what it will be. -// For more information, see xref:../osd_gcp_clusters/creating-a-gcp-cluster.adoc#osd-create-gcp-cluster-ccs_osd-creating-a-cluster-on-gcp[Creating a cluster on GCP with CCS]. +// For more information, see xref:../osd_gcp_clusters/creating-a-gcp-cluster.adoc#osd-create-gcp-cluster-ccs_osd-creating-a-cluster-on-gcp[Creating a cluster on {gcp-full} with CCS]. -* **CSI Operator update.** {product-title} is capable of provisioning persistent volumes (PVs) using the Container Storage Interface (CSI) driver for Google Compute Platform (GCP) Filestore Storage. For more information, see xref:../storage/container_storage_interface/persistent-storage-csi-google-cloud-file.adoc#persistent-storage-csi-google-cloud-file-overview[Google Compute Platform Filestore CSI Driver Operator]. +* **CSI Operator update.** {product-title} is capable of provisioning persistent volumes (PVs) using the Container Storage Interface (CSI) driver for Google Compute Platform Filestore Storage. For more information, see xref:../storage/container_storage_interface/persistent-storage-csi-google-cloud-file.adoc#persistent-storage-csi-google-cloud-file-overview[Google Compute Platform Filestore CSI Driver Operator]. -* **Support for new GCP instances.** {product-title} now supports more worker node types and sizes on Google Cloud Platform. For more information, see xref:../osd_architecture/osd_policy/osd-service-definition.adoc#gcp-compute-types_osd-service-definition[Google Cloud compute types]. +* **Support for new {gcp-full} instances.** {product-title} now supports more worker node types and sizes on {gcp-full}. For more information, see xref:../osd_architecture/osd_policy/osd-service-definition.adoc#gcp-compute-types_osd-service-definition[Google Cloud compute types]. [id="osd-q1-2024_{context}"] === Q1 2024 @@ -142,15 +142,15 @@ For more information about region availabilities, see xref:../osd_architecture/o [id="osd-q4-2023_{context}"] === Q4 2023 -* **Policy constraint update.** {product-title} on {GCP} users can now enable UEFISecureBoot during cluster installation, as required by the GCP ShieldVM policy. This new feature adds further protection from boot or kernel-level malware or rootkits. +* **Policy constraint update.** {product-title} on {GCP} users can now enable UEFISecureBoot during cluster installation, as required by the {gcp-full} ShieldVM policy. This new feature adds further protection from boot or kernel-level malware or rootkits. * **Cluster install update.** {product-title} clusters can now be installed on {GCP} shared VPCs. //Removed link as is no longer valid. Need to decide if we need a link here and if so, what it will be. -// For more information, see xref:../osd_gcp_clusters/creating-a-gcp-cluster.adoc#osd-create-gcp-cluster-ccs_osd-creating-a-cluster-on-gcp[Creating a cluster on GCP with CCS]. +// For more information, see xref:../osd_gcp_clusters/creating-a-gcp-cluster.adoc#osd-create-gcp-cluster-ccs_osd-creating-a-cluster-on-gcp[Creating a cluster on {gcp-full} with CCS]. * **{product-title} on Google Cloud Marketplace availability.** When creating an {product-title} (OSD) cluster on Google Cloud through the Hybrid Cloud Console, customers can now select Google Cloud Marketplace as their preferred billing model. This billing model allows Red Hat customers to take advantage of their link:https://cloud.google.com/docs/cuds[Google Committed Use Discounts (CUD)] towards {product-title} purchased through the Google Cloud Marketplace. //Removed link as is no longer valid. Need to decide if we need a link here and if so, what it will be. -// For more information, see xref:../osd_gcp_clusters/creating-a-gcp-cluster.adoc#osd-create-gcp-cluster-ccs_osd-creating-a-cluster-on-gcp[Creating a cluster on GCP with CCS]. +// For more information, see xref:../osd_gcp_clusters/creating-a-gcp-cluster.adoc#osd-create-gcp-cluster-ccs_osd-creating-a-cluster-on-gcp[Creating a cluster on {gcp-full} with CCS]. [id="osd-known-issues_{context}"] == Known issues diff --git a/post_installation_configuration/changing-cloud-credentials-configuration.adoc b/post_installation_configuration/changing-cloud-credentials-configuration.adoc index 4c14db8e9000..9faa10a1af7a 100644 --- a/post_installation_configuration/changing-cloud-credentials-configuration.adoc +++ b/post_installation_configuration/changing-cloud-credentials-configuration.adoc @@ -71,7 +71,7 @@ include::modules/manually-removing-cloud-creds.adoc[leveloffset=+2] [id="post-install-enable-token-auth_{context}"] == Enabling token-based authentication -//Today, just Entra. But this should be a section that anticipates the addition of AWS STS and GCP WID. +//Today, just Entra. But this should be a section that anticipates the addition of AWS STS and {gcp-full} WID. After installing an {azure-first} {product-title} cluster, you can enable {entra-first} to use short-term credentials. diff --git a/post_installation_configuration/configuring-multi-arch-compute-machines/creating-multi-arch-compute-nodes-gcp.adoc b/post_installation_configuration/configuring-multi-arch-compute-machines/creating-multi-arch-compute-nodes-gcp.adoc index 0fcbbb778fd8..e3a280013e0c 100644 --- a/post_installation_configuration/configuring-multi-arch-compute-machines/creating-multi-arch-compute-nodes-gcp.adoc +++ b/post_installation_configuration/configuring-multi-arch-compute-machines/creating-multi-arch-compute-nodes-gcp.adoc @@ -1,12 +1,12 @@ :_mod-docs-content-type: ASSEMBLY +include::_attributes/common-attributes.adoc[] :context: creating-multi-arch-compute-nodes-gcp [id="creating-multi-arch-compute-nodes-gcp"] -= Creating a cluster with multi-architecture compute machines on GCP -include::_attributes/common-attributes.adoc[] += Creating a cluster with multi-architecture compute machines on {gcp-full} toc::[] -To create a Google Cloud Platform (GCP) cluster with multi-architecture compute machines, you must first create a single-architecture GCP installer-provisioned cluster with the multi-architecture installer binary. For more information on AWS installations, see xref:../../installing/installing_gcp/installing-gcp-customizations.adoc[Installing a cluster on GCP with customizations]. +To create a {gcp-full} cluster with multi-architecture compute machines, you must first create a single-architecture {gcp-full} installer-provisioned cluster with the multi-architecture installer binary. For more information on AWS installations, see xref:../../installing/installing_gcp/installing-gcp-customizations.adoc[Installing a cluster on {gcp-full} with customizations]. You can also migrate your current cluster with single-architecture compute machines to a cluster with multi-architecture compute machines. For more information, see xref:../../updating/updating_a_cluster/migrating-to-multi-payload.adoc#migrating-to-multi-payload[Migrating to a cluster with multi-architecture compute machines]. @@ -14,7 +14,7 @@ After creating a multi-architecture cluster, you can add nodes with different ar [NOTE] ==== -Secure booting is currently not supported on 64-bit ARM machines for GCP +Secure booting is currently not supported on 64-bit ARM machines for {gcp-full} ==== include::modules/multi-architecture-verifying-cluster-compatibility.adoc[leveloffset=+1] @@ -23,6 +23,6 @@ include::modules/multi-architecture-modify-machine-set-gcp.adoc[leveloffset=+1] .Additional resources -* xref:../../installing/installing_gcp/installing-gcp-customizations.adoc#installation-gcp-tested-machine-types-arm_installing-gcp-customizations[Tested instance types for GCP on 64-bit ARM infrastructures] +* xref:../../installing/installing_gcp/installing-gcp-customizations.adoc#installation-gcp-tested-machine-types-arm_installing-gcp-customizations[Tested instance types for {gcp-full} on 64-bit ARM infrastructures] * xref:../../post_installation_configuration/configuring-multi-arch-compute-machines/multiarch-tuning-operator.adoc#multiarch-tuning-operator[Managing workloads on multi-architecture clusters by using the Multiarch Tuning Operator] \ No newline at end of file diff --git a/post_installation_configuration/configuring-multi-arch-compute-machines/multi-architecture-configuration.adoc b/post_installation_configuration/configuring-multi-arch-compute-machines/multi-architecture-configuration.adoc index ad14c4efb54b..a215a4506dbc 100644 --- a/post_installation_configuration/configuring-multi-arch-compute-machines/multi-architecture-configuration.adoc +++ b/post_installation_configuration/configuring-multi-arch-compute-machines/multi-architecture-configuration.adoc @@ -43,8 +43,8 @@ To create a cluster with multi-architecture compute machines with different inst |`aarch64` or `x86_64` |`aarch64`, `x86_64` -|xref:../../post_installation_configuration/configuring-multi-arch-compute-machines/creating-multi-arch-compute-nodes-gcp.adoc#creating-multi-arch-compute-nodes-gcp[Creating a cluster with multi-architecture compute machines on GCP] -|Google Cloud Platform (GCP) +|xref:../../post_installation_configuration/configuring-multi-arch-compute-machines/creating-multi-arch-compute-nodes-gcp.adoc#creating-multi-arch-compute-nodes-gcp[Creating a cluster with multi-architecture compute machines on {gcp-full}] +|{gcp-full} | |✓ |`aarch64` or `x86_64` @@ -94,5 +94,5 @@ To create a cluster with multi-architecture compute machines with different inst [IMPORTANT] ==== -Autoscaling from zero is currently not supported on Google Cloud Platform (GCP). +Autoscaling from zero is currently not supported on {gcp-full}. ==== diff --git a/registry/configuring-registry-operator.adoc b/registry/configuring-registry-operator.adoc index fbc66f4d161f..1cd2db2a8fd1 100644 --- a/registry/configuring-registry-operator.adoc +++ b/registry/configuring-registry-operator.adoc @@ -19,9 +19,9 @@ The Image Registry Operator installs a single instance of the {product-registry} ifndef::openshift-dedicated,openshift-rosa,openshift-rosa-hcp[] [NOTE] ==== -Storage is only automatically configured when you install an installer-provisioned infrastructure cluster on AWS, Azure, GCP, {ibm-name}, or OpenStack. +Storage is only automatically configured when you install an installer-provisioned infrastructure cluster on AWS, Azure, {gcp-full}, {ibm-name}, or OpenStack. -When you install or upgrade an installer-provisioned infrastructure cluster on AWS, Azure, GCP, {ibm-name}, or OpenStack, the Image Registry Operator sets the `spec.storage.managementState` parameter to `Managed`. If the `spec.storage.managementState` parameter is set to `Unmanaged`, the Image Registry Operator takes no action related to storage. +When you install or upgrade an installer-provisioned infrastructure cluster on AWS, Azure, {gcp-full}, {ibm-name}, or OpenStack, the Image Registry Operator sets the `spec.storage.managementState` parameter to `Managed`. If the `spec.storage.managementState` parameter is set to `Unmanaged`, the Image Registry Operator takes no action related to storage. ==== endif::openshift-dedicated,openshift-rosa,openshift-rosa-hcp[] @@ -75,7 +75,7 @@ include::modules/registry-operator-config-resources-storage-credentials.adoc[lev == Additional resources * xref:../registry/configuring_registry_storage/configuring-registry-storage-aws-user-infrastructure.adoc#configuring-registry-storage-aws-user-infrastructure[Configuring the registry for AWS user-provisioned infrastructure] -* xref:../registry/configuring_registry_storage/configuring-registry-storage-gcp-user-infrastructure.adoc#configuring-registry-storage-gcp-user-infrastructure[Configuring the registry for GCP user-provisioned infrastructure] +* xref:../registry/configuring_registry_storage/configuring-registry-storage-gcp-user-infrastructure.adoc#configuring-registry-storage-gcp-user-infrastructure[Configuring the registry for {gcp-full} user-provisioned infrastructure] * xref:../registry/configuring_registry_storage/configuring-registry-storage-azure-user-infrastructure.adoc#configuring-registry-storage-azure-user-infrastructure[Configuring the registry for Azure user-provisioned infrastructure] * xref:../registry/configuring_registry_storage/configuring-registry-storage-baremetal.adoc#configuring-registry-storage-baremetal[Configuring the registry for bare metal] * xref:../registry/configuring_registry_storage/configuring-registry-storage-vsphere.adoc#configuring-registry-storage-vsphere[Configuring the registry for vSphere] diff --git a/registry/configuring_registry_storage/configuring-registry-storage-gcp-user-infrastructure.adoc b/registry/configuring_registry_storage/configuring-registry-storage-gcp-user-infrastructure.adoc index c2ebe1dd7553..c5d181041a7a 100644 --- a/registry/configuring_registry_storage/configuring-registry-storage-gcp-user-infrastructure.adoc +++ b/registry/configuring_registry_storage/configuring-registry-storage-gcp-user-infrastructure.adoc @@ -1,7 +1,7 @@ :_mod-docs-content-type: ASSEMBLY -[id="configuring-registry-storage-gcp-user-infrastructure"] -= Configuring the registry for GCP user-provisioned infrastructure include::_attributes/common-attributes.adoc[] +[id="configuring-registry-storage-gcp-user-infrastructure"] += Configuring the registry for {gcp-full} user-provisioned infrastructure :context: configuring-registry-storage-gcp-user-infrastructure toc::[] diff --git a/rest_api/config_apis/infrastructure-config-openshift-io-v1.adoc b/rest_api/config_apis/infrastructure-config-openshift-io-v1.adoc index 6146375f6195..d095305a004c 100644 --- a/rest_api/config_apis/infrastructure-config-openshift-io-v1.adoc +++ b/rest_api/config_apis/infrastructure-config-openshift-io-v1.adoc @@ -122,7 +122,7 @@ Type:: | `name` | `string` -| +| |=== === .spec.platformSpec @@ -2911,7 +2911,7 @@ Description:: | Parameter | Type | Description | `body` | xref:../config_apis/infrastructure-config-openshift-io-v1.adoc#infrastructure-config-openshift-io-v1[`Infrastructure`] schema -| +| |=== .HTTP responses @@ -3044,7 +3044,7 @@ Description:: | Parameter | Type | Description | `body` | xref:../config_apis/infrastructure-config-openshift-io-v1.adoc#infrastructure-config-openshift-io-v1[`Infrastructure`] schema -| +| |=== .HTTP responses @@ -3146,7 +3146,7 @@ Description:: | Parameter | Type | Description | `body` | xref:../config_apis/infrastructure-config-openshift-io-v1.adoc#infrastructure-config-openshift-io-v1[`Infrastructure`] schema -| +| |=== .HTTP responses diff --git a/security/cert_manager_operator/cert-manager-authenticate.adoc b/security/cert_manager_operator/cert-manager-authenticate.adoc index 9849b14d71ed..a633a57ac86d 100644 --- a/security/cert_manager_operator/cert-manager-authenticate.adoc +++ b/security/cert_manager_operator/cert-manager-authenticate.adoc @@ -20,10 +20,10 @@ include::modules/cert-manager-configure-cloud-credentials-aws-sts.adoc[leveloffs * xref:../../installing/installing_aws/ipi/installing-aws-customizations.adoc#cco-ccoctl-configuring_installing-aws-customizations[Configuring the Cloud Credential Operator utility] -// on GCP +// on {gcp-full} include::modules/cert-manager-configure-cloud-credentials-gcp-non-sts.adoc[leveloffset=+1] -// with GCP Workload Identity +// with {gcp-full} Workload Identity include::modules/cert-manager-configure-cloud-credentials-gcp-sts.adoc[leveloffset=+1] [role="_additional-resources"] diff --git a/security/cert_manager_operator/cert-manager-operator-issuer-acme.adoc b/security/cert_manager_operator/cert-manager-operator-issuer-acme.adoc index c5a38690acfa..b125b982bef6 100644 --- a/security/cert_manager_operator/cert-manager-operator-issuer-acme.adoc +++ b/security/cert_manager_operator/cert-manager-operator-issuer-acme.adoc @@ -48,10 +48,10 @@ include::modules/cert-manager-acme-dns01-explicit-aws.adoc[leveloffset=+1] //Configuring an ACME issuer to solve DNS01 challenges by using ambient credentials on AWS include::modules/cert-manager-acme-dns01-ambient-aws.adoc[leveloffset=+1] -// Configuring an ACME issuer to solve DNS01 challenges by using explicit credentials on GCP +// Configuring an ACME issuer to solve DNS01 challenges by using explicit credentials on {gcp-full} include::modules/cert-manager-acme-dns01-explicit-gcp.adoc[leveloffset=+1] -// Configuring an ACME issuer to solve DNS01 challenges by using ambient credentials on GCP +// Configuring an ACME issuer to solve DNS01 challenges by using ambient credentials on {gcp-full} include::modules/cert-manager-acme-dns01-ambient-gcp.adoc[leveloffset=+1] // Configuring an ACME issuer to solve DNS01 challenges by using explicit credentials on Microsoft Azure @@ -65,6 +65,6 @@ include::modules/cert-manager-acme-dns01-explicit-azure.adoc[leveloffset=+1] * xref:../../security/cert_manager_operator/cert-manager-authenticate.adoc#cert-manager-configure-cloud-credentials-aws-non-sts_cert-manager-authenticate[Configuring cloud credentials for the {cert-manager-operator} on AWS] -* xref:../../security/cert_manager_operator/cert-manager-authenticate.adoc#cert-manager-configure-cloud-credentials-gcp-sts_cert-manager-authenticate[Configuring cloud credentials for the {cert-manager-operator} with GCP Workload Identity] +* xref:../../security/cert_manager_operator/cert-manager-authenticate.adoc#cert-manager-configure-cloud-credentials-gcp-sts_cert-manager-authenticate[Configuring cloud credentials for the {cert-manager-operator} with {gcp-full} Workload Identity] -* xref:../../security/cert_manager_operator/cert-manager-authenticate.adoc#cert-manager-configure-cloud-credentials-gcp-non-sts_cert-manager-authenticate[Configuring cloud credentials for the {cert-manager-operator} on GCP] \ No newline at end of file +* xref:../../security/cert_manager_operator/cert-manager-authenticate.adoc#cert-manager-configure-cloud-credentials-gcp-non-sts_cert-manager-authenticate[Configuring cloud credentials for the {cert-manager-operator} on {gcp-full}] \ No newline at end of file diff --git a/snippets/distr-tracing-tempo-required-secret-parameters.adoc b/snippets/distr-tracing-tempo-required-secret-parameters.adoc index 8ae477e11235..7f5a7693a059 100644 --- a/snippets/distr-tracing-tempo-required-secret-parameters.adoc +++ b/snippets/distr-tracing-tempo-required-secret-parameters.adoc @@ -71,12 +71,12 @@ See link:https://operator.min.io/[MinIO Operator]. `account_key: ` -|Google Cloud Storage on Google Cloud Platform (GCP) +|Google Cloud Storage on {gcp-full} | `name: tempostack-dev-gcs # example` `bucketname: # requires a link:https://cloud.google.com/storage/docs/creating-buckets[bucket] created in a link:https://cloud.google.com/resource-manager/docs/creating-managing-projects[GCP project]` -`key.json: # requires a link:https://cloud.google.com/docs/authentication/getting-started#creating_a_service_account[service account] in the bucket's GCP project for GCP authentication` +`key.json: # requires a link:https://cloud.google.com/docs/authentication/getting-started#creating_a_service_account[service account] in the bucket's {gcp-full} project for {gcp-full} authentication` |=== diff --git a/storage/container_storage_interface/persistent-storage-csi-gcp-pd.adoc b/storage/container_storage_interface/persistent-storage-csi-gcp-pd.adoc index 8443e60b6de2..fb9adad9ff3a 100644 --- a/storage/container_storage_interface/persistent-storage-csi-gcp-pd.adoc +++ b/storage/container_storage_interface/persistent-storage-csi-gcp-pd.adoc @@ -1,28 +1,28 @@ :_mod-docs-content-type: ASSEMBLY -[id="persistent-storage-csi-gcp-pd"] -= GCP PD CSI Driver Operator include::_attributes/common-attributes.adoc[] include::_attributes/attributes-openshift-dedicated.adoc[] +[id="persistent-storage-csi-gcp-pd"] += {gcp-full} PD CSI Driver Operator :context: persistent-storage-csi-gcp-pd toc::[] == Overview -{product-title} can provision persistent volumes (PVs) using the Container Storage Interface (CSI) driver for Google Cloud Platform (GCP) persistent disk (PD) storage. +{product-title} can provision persistent volumes (PVs) using the Container Storage Interface (CSI) driver for {gcp-full} persistent disk (PD) storage. :FeatureName: GCP PD CSI Driver Operator Familiarity with xref:../../storage/understanding-persistent-storage.adoc#understanding-persistent-storage[persistent storage] and xref:../../storage/container_storage_interface/persistent-storage-csi.adoc#persistent-storage-csi[configuring CSI volumes] is recommended when working with a Container Storage Interface (CSI) Operator and driver. -To create CSI-provisioned persistent volumes (PVs) that mount to GCP PD storage assets, {product-title} installs the GCP PD CSI Driver Operator and the GCP PD CSI driver by default in the `openshift-cluster-csi-drivers` namespace. +To create CSI-provisioned persistent volumes (PVs) that mount to {gcp-full} PD storage assets, {product-title} installs the {gcp-full} PD CSI Driver Operator and the {gcp-full} PD CSI driver by default in the `openshift-cluster-csi-drivers` namespace. -* *GCP PD CSI Driver Operator*: By default, the Operator provides a storage class that you can use to create PVCs. You can disable this default storage class if desired (see xref:../../storage/container_storage_interface/persistent-storage-csi-sc-manage.adoc#persistent-storage-csi-sc-manage[Managing the default storage class]). You also have the option to create the GCP PD storage class as described in xref:../../storage/persistent_storage/persistent-storage-gce.adoc#persistent-storage-using-gce[Persistent storage using GCE Persistent Disk]. +* *{gcp-full} PD CSI Driver Operator*: By default, the Operator provides a storage class that you can use to create PVCs. You can disable this default storage class if desired (see xref:../../storage/container_storage_interface/persistent-storage-csi-sc-manage.adoc#persistent-storage-csi-sc-manage[Managing the default storage class]). You also have the option to create the {gcp-full} PD storage class as described in xref:../../storage/persistent_storage/persistent-storage-gce.adoc#persistent-storage-using-gce[Persistent storage using GCE Persistent Disk]. -* *GCP PD driver*: The driver enables you to create and mount GCP PD PVs. +* *{gcp-full} PD driver*: The driver enables you to create and mount {gcp-full} PD PVs. // + -GCP PD CSI driver supports the C3 instance type for bare metal and N4 machine series. The C3 instance type and N4 machine series support the hyperdisk-balanced disks. +{gcp-full} PD CSI driver supports the C3 instance type for bare metal and N4 machine series. The C3 instance type and N4 machine series support the hyperdisk-balanced disks. ifndef::openshift-dedicated[] [NOTE] ==== @@ -45,7 +45,7 @@ ifndef::openshift-dedicated[] [id="resources-for-gcp-c3-n4-instances"] [role="_additional-resources"] === Additional resources -* xref:../../installing/installing_gcp/installing-gcp-customizations.adoc#installing-gcp-customizations[Installing a cluster on GCP with customizations] +* xref:../../installing/installing_gcp/installing-gcp-customizations.adoc#installing-gcp-customizations[Installing a cluster on {gcp-full} with customizations] endif::openshift-dedicated[] include::modules/persistent-storage-csi-about.adoc[leveloffset=+1] @@ -57,7 +57,7 @@ include::modules/persistent-storage-csi-gcp-pd-encrypted-pv.adoc[leveloffset=+1] ifndef::openshift-rosa,openshift-dedicated[] include::modules/persistent-storage-byok.adoc[leveloffset=+1] -For information about installing with user-managed encryption for GCP PD, see xref:../../installing/installing_gcp/installing-gcp-customizations.adoc#installation-configuration-parameters_installing-gcp-customizations[Installation configuration parameters]. +For information about installing with user-managed encryption for {gcp-full} PD, see xref:../../installing/installing_gcp/installing-gcp-customizations.adoc#installation-configuration-parameters_installing-gcp-customizations[Installation configuration parameters]. endif::openshift-rosa,openshift-dedicated[] [id="resources-for-gcp"] diff --git a/storage/container_storage_interface/persistent-storage-csi-google-cloud-file.adoc b/storage/container_storage_interface/persistent-storage-csi-google-cloud-file.adoc index f9bbb6420d5c..0ad511495fd7 100644 --- a/storage/container_storage_interface/persistent-storage-csi-google-cloud-file.adoc +++ b/storage/container_storage_interface/persistent-storage-csi-google-cloud-file.adoc @@ -10,7 +10,7 @@ toc::[] [id="persistent-storage-csi-google-cloud-file-overview"] == Overview -{product-title} is capable of provisioning persistent volumes (PVs) using the Container Storage Interface (CSI) driver for Google Compute Platform (GCP) Filestore Storage. +{product-title} is capable of provisioning persistent volumes (PVs) using the Container Storage Interface (CSI) driver for Google Compute Platform Filestore Storage. Familiarity with xref:../../storage/understanding-persistent-storage.adoc#understanding-persistent-storage[persistent storage] and xref:../../storage/container_storage_interface/persistent-storage-csi.adoc#persistent-storage-csi[configuring CSI volumes] is recommended when working with a CSI Operator and driver. @@ -20,7 +20,7 @@ To create CSI-provisioned PVs that mount to {gcp-short} Filestore Storage assets * The _{gcp-short} Filestore CSI driver_ enables you to create and mount {gcp-short} Filestore PVs. -{product-title} {gcp-short} Filestore supports Workload Identity. This allows users to access Google Cloud resources using federated identities instead of a service account key. {gcp-wid-short} must be enabled globally during installation, and then configured for the {gcp-short} Filestore CSI Driver Operator. +{product-title} {gcp-short} Filestore supports Workload Identity. This allows users to access Google Cloud resources using federated identities instead of a service account key. {gcp-wid-short} must be enabled globally during installation, and then configured for the {gcp-short} Filestore CSI Driver Operator. For more information, see xref:../../storage/container_storage_interface/persistent-storage-csi-google-cloud-file.adoc#installing-the-gcp-filestore-csi-driver-operator[Installing the {gcp-short} Filestore CSI Driver Operator]. include::modules/persistent-storage-csi-about.adoc[leveloffset=+1] diff --git a/storage/container_storage_interface/persistent-storage-csi-sc-manage.adoc b/storage/container_storage_interface/persistent-storage-csi-sc-manage.adoc index 5f2b9cb35849..1514adadb66d 100644 --- a/storage/container_storage_interface/persistent-storage-csi-sc-manage.adoc +++ b/storage/container_storage_interface/persistent-storage-csi-sc-manage.adoc @@ -33,7 +33,7 @@ Managing the default storage classes is supported by the following Container Sto * xref:../../storage/container_storage_interface/persistent-storage-csi-azure-file.adoc#persistent-storage-csi-azure-file[Azure File] -* xref:../../storage/container_storage_interface/persistent-storage-csi-gcp-pd.adoc#persistent-storage-csi-gcp-pd[Google Cloud Platform (GCP) Persistent Disk (PD)] +* xref:../../storage/container_storage_interface/persistent-storage-csi-gcp-pd.adoc#persistent-storage-csi-gcp-pd[{gcp-full} Persistent Disk (PD)] * xref:../../storage/container_storage_interface/persistent-storage-csi-ibm-cloud-vpc-block.adoc#persistent-storage-csi-ibm-cloud-vpc-block[{ibm-cloud-name} VPC Block] diff --git a/support/troubleshooting/troubleshooting-osd-gcp-cluster-deployment.adoc b/support/troubleshooting/troubleshooting-osd-gcp-cluster-deployment.adoc index 0822293c1edc..0c0cb4753fd0 100644 --- a/support/troubleshooting/troubleshooting-osd-gcp-cluster-deployment.adoc +++ b/support/troubleshooting/troubleshooting-osd-gcp-cluster-deployment.adoc @@ -1,7 +1,7 @@ :_mod-docs-content-type: ASSEMBLY -[id="troubleshooting-osd-gcp-cluster-deployment"] -= Troubleshooting an {product-title} on GCP cluster deployment include::_attributes/common-attributes.adoc[] +[id="troubleshooting-osd-gcp-cluster-deployment"] += Troubleshooting an {product-title} on {gcp-full} cluster deployment :context: troubleshooting-osd-gcp-cluster-deployment toc::[] diff --git a/updating/preparing_for_updates/preparing-manual-creds-update.adoc b/updating/preparing_for_updates/preparing-manual-creds-update.adoc index 9f55410e7f4a..e13c9054e61b 100644 --- a/updating/preparing_for_updates/preparing-manual-creds-update.adoc +++ b/updating/preparing_for_updates/preparing-manual-creds-update.adoc @@ -64,7 +64,7 @@ include::modules/manually-maintained-credentials-upgrade.adoc[leveloffset=+1] * xref:../../installing/installing_aws/ipi/installing-aws-customizations.adoc#manually-create-iam_installing-aws-customizations[Manually creating long-term credentials for AWS] * xref:../../installing/installing_azure/ipi/installing-azure-customizations.adoc#manually-create-iam_installing-azure-customizations[Manually creating long-term credentials for Azure] * xref:../../installing/installing_azure_stack_hub/ipi/installing-azure-stack-hub-default.adoc#manually-create-iam_installing-azure-stack-hub-default[Manually creating long-term credentials for Azure Stack Hub] -* xref:../../installing/installing_gcp/installing-gcp-customizations.adoc#manually-create-iam_installing-gcp-customizations[Manually creating long-term credentials for GCP] +* xref:../../installing/installing_gcp/installing-gcp-customizations.adoc#manually-create-iam_installing-gcp-customizations[Manually creating long-term credentials for {gcp-full}] * xref:../../updating/preparing_for_updates/preparing-manual-creds-update.adoc#cco-manual-upgrade-annotation_preparing-manual-creds-update[Indicating that the cluster is ready to upgrade] //Indicating that the cluster is ready to upgrade diff --git a/updating/updating_a_cluster/migrating-to-multi-payload.adoc b/updating/updating_a_cluster/migrating-to-multi-payload.adoc index ba3f1dae2b83..3ba8225e8e1e 100644 --- a/updating/updating_a_cluster/migrating-to-multi-payload.adoc +++ b/updating/updating_a_cluster/migrating-to-multi-payload.adoc @@ -40,7 +40,7 @@ include::modules/migrating-to-multi-arch-cli.adoc[leveloffset=+1] // Migrating the x86 control plane to the arm64 architecture on AWS include::modules/migrating-from-x86-to-arm-cp.adoc[leveloffset=+1] -// Migrating CP or infra between x86 and arm on GCP +// Migrating CP or infra between x86 and arm on {gcp-full} include::modules/multiarch-migrating-cp-infra-gcp.adoc[leveloffset=+1] [role="_additional-resources"] @@ -50,6 +50,6 @@ include::modules/multiarch-migrating-cp-infra-gcp.adoc[leveloffset=+1] * xref:../../installing/installing_aws/upi/upi-aws-installation-reqs.adoc#installation-aws-arm-tested-machine-types_upi-aws-installation-reqs[Tested instance types for AWS on 64-bit ARM infrastructures] -* xref:../../installing/installing_gcp/installing-gcp-customizations.adoc#installation-gcp-tested-machine-types-arm_installing-gcp-customizations[Tested instance types for GCP on 64-bit ARM infrastructures] +* xref:../../installing/installing_gcp/installing-gcp-customizations.adoc#installation-gcp-tested-machine-types-arm_installing-gcp-customizations[Tested instance types for {gcp-full} on 64-bit ARM infrastructures] * xref:../../updating/updating_a_cluster/migrating-to-multi-payload.adoc#migrating-to-multi-arch-cli_updating-clusters-overview[Migrating to a cluster with multi-architecture compute machines using the CLI] diff --git a/welcome/glossary.adoc b/welcome/glossary.adoc index e27d61ea877b..e64db2556df3 100644 --- a/welcome/glossary.adoc +++ b/welcome/glossary.adoc @@ -145,7 +145,7 @@ A command line tool to run {product-title} commands on the terminal. ifndef::openshift-dedicated,openshift-rosa[] OpenShift Dedicated:: -A managed {op-system-base} {product-title} offering on Amazon Web Services (AWS) and Google Cloud Platform (GCP). OpenShift Dedicated focuses on building and scaling applications. +A managed {op-system-base} {product-title} offering on Amazon Web Services (AWS) and {gcp-full}. OpenShift Dedicated focuses on building and scaling applications. endif::openshift-dedicated,openshift-rosa[] OpenShift Update Service (OSUS):: diff --git a/welcome/openshift-editions.adoc b/welcome/openshift-editions.adoc index 1a651bbe209c..3dd27d1b7a4d 100644 --- a/welcome/openshift-editions.adoc +++ b/welcome/openshift-editions.adoc @@ -19,7 +19,7 @@ For more information, see link:https://www.redhat.com/en/technologies/cloud-comp {azure-first} Red{nbsp}Hat OpenShift:: A fully managed application platform that helps organizations build, deploy, and scale applications on Azure. For more information, see link:https://www.redhat.com/en/technologies/cloud-computing/openshift/azure[{azure-first} Red{nbsp}Hat OpenShift]. -{product-dedicated}:: A managed Red{nbsp}Hat OpenShift offering available on Google Cloud Platform (GCP). +{product-dedicated}:: A managed Red{nbsp}Hat OpenShift offering available on {gcp-full}. For more information, see link:https://www.redhat.com/en/technologies/cloud-computing/openshift/dedicated[{product-dedicated}]. Red{nbsp}Hat OpenShift on {ibm-cloud-title}:: A managed OpenShift cloud service that reduces operational complexity and helps developers build and scale applications on {ibm-cloud-title}. diff --git a/windows_containers/creating_windows_machinesets/creating-windows-machineset-gcp.adoc b/windows_containers/creating_windows_machinesets/creating-windows-machineset-gcp.adoc index 35ed8735e092..45becdab5901 100644 --- a/windows_containers/creating_windows_machinesets/creating-windows-machineset-gcp.adoc +++ b/windows_containers/creating_windows_machinesets/creating-windows-machineset-gcp.adoc @@ -1,12 +1,12 @@ :_mod-docs-content-type: ASSEMBLY -[id="creating-windows-machineset-gcp"] -= Creating a Windows machine set on GCP include::_attributes/common-attributes.adoc[] +[id="creating-windows-machineset-gcp"] += Creating a Windows machine set on {gcp-full} :context: creating-windows-machineset-gcp toc::[] -You can create a Windows `MachineSet` object to serve a specific purpose in your {product-title} cluster on Google Cloud Platform (GCP). For example, you might create infrastructure Windows machine sets and related machines so that you can move supporting Windows workloads to the new Windows machines. +You can create a Windows `MachineSet` object to serve a specific purpose in your {product-title} cluster on {gcp-full}. For example, you might create infrastructure Windows machine sets and related machines so that you can move supporting Windows workloads to the new Windows machines. == Prerequisites diff --git a/windows_containers/wmco_rn/windows-containers-release-notes-prereqs.adoc b/windows_containers/wmco_rn/windows-containers-release-notes-prereqs.adoc index 3a2a2b0dcbad..fecf3905ece5 100644 --- a/windows_containers/wmco_rn/windows-containers-release-notes-prereqs.adoc +++ b/windows_containers/wmco_rn/windows-containers-release-notes-prereqs.adoc @@ -27,7 +27,7 @@ a|* Windows Server 2022, OS Build link:https://support.microsoft.com/en-us/topic |VMware vSphere |Windows Server 2022, OS Build link:https://support.microsoft.com/en-us/topic/april-25-2022-kb5012637-os-build-20348-681-preview-2233d69c-d4a5-4be9-8c24-04a450861a8d[20348.681] or later -|Google Cloud Platform (GCP) +|{gcp-full} |Windows Server 2022, OS Build link:https://support.microsoft.com/en-us/topic/april-25-2022-kb5012637-os-build-20348-681-preview-2233d69c-d4a5-4be9-8c24-04a450861a8d[20348.681] or later |Nutanix @@ -59,7 +59,7 @@ Hybrid networking with OVN-Kubernetes is the only supported networking configura |VMware vSphere |Hybrid networking with OVN-Kubernetes with a custom VXLAN port -|Google Cloud Platform (GCP) +|{gcp-full} |Hybrid networking with OVN-Kubernetes |Nutanix