diff --git a/.s2i/httpd-cfg/01-commercial.conf b/.s2i/httpd-cfg/01-commercial.conf index 63737951fdbc..7e2fadf0799e 100644 --- a/.s2i/httpd-cfg/01-commercial.conf +++ b/.s2i/httpd-cfg/01-commercial.conf @@ -295,22 +295,22 @@ AddType text/vtt vtt RewriteRule ^container-platform/(4\.9|4\.10|4\.11|4\.12)/serverless/develop/serverless-traffic-management.html /container-platform/$1/serverless/knative-serving/traffic-splitting/traffic-splitting-overview.html [NE,R=302] - # redirect latest to 1.32 + # redirect latest to 1.33 RewriteRule ^serverless/?$ /serverless/latest [R=302] - RewriteRule ^serverless/latest/?(.*)$ /serverless/1\.32/$1 [NE,R=302] + RewriteRule ^serverless/latest/?(.*)$ /serverless/1\.33/$1 [NE,R=302] # redirect top-level without filespec to the about file - RewriteRule ^serverless/(1\.28|1\.29|1\.30|1\.31|1\.32)/?$ /serverless/$1/about/about-serverless.html [L,R=302] + RewriteRule ^serverless/(1\.28|1\.29|1\.30|1\.31|1\.32|1\.33)/?$ /serverless/$1/about/about-serverless.html [L,R=302] # redirect rel notes - RewriteRule ^container-platform/(4\.10|4\.11|4\.12|4\.13|4\.14)/serverless/serverless-release-notes.html$ /serverless/1.32/about/serverless-release-notes.html [L,R=302] - RewriteRule ^(rosa|dedicated)/serverless/serverless-release-notes.html$ /serverless/1.32/about/serverless-release-notes.html [L,R=302] + RewriteRule ^container-platform/(4\.10|4\.11|4\.12|4\.13|4\.14)/serverless/serverless-release-notes.html$ /serverless/1.33/about/serverless-release-notes.html [L,R=302] + RewriteRule ^(rosa|dedicated)/serverless/serverless-release-notes.html$ /serverless/1.33/about/serverless-release-notes.html [L,R=302] # redirect any links to existing OCP embedded content to standalone equivalent - RewriteRule ^container-platform/(4\.10|4\.11|4\.12|4\.13|4\.14|4\.15|4\.16|4\.17|4\.18)/serverless/?(.*)$ /serverless/1.32/$2 [L,R=302] + RewriteRule ^container-platform/(4\.10|4\.11|4\.12|4\.13|4\.14|4\.15|4\.16|4\.17|4\.18)/serverless/?(.*)$ /serverless/1.33/$2 [L,R=302] # redirect any links to existing ROSA/Dedicated embedded content to standalone equivalent - RewriteRule ^(rosa|dedicated)/serverless/?(.*)$ /serverless/1.32/$2 [L,R=302] + RewriteRule ^(rosa|dedicated)/serverless/?(.*)$ /serverless/1.33/$2 [L,R=302] # redirect builds latest to 1.0 RewriteRule ^builds/?$ /builds/latest [R=302] @@ -358,7 +358,7 @@ AddType text/vtt vtt # Pipelines handling unversioned and latest links RewriteRule ^pipelines/?$ /pipelines/latest [R=302] - RewriteRule ^pipelines/latest/?(.*)$ /pipelines/1\.14/$1 [NE,R=302] + RewriteRule ^pipelines/latest/?(.*)$ /pipelines/1\.15/$1 [NE,R=302] # Pipelines landing page @@ -386,7 +386,7 @@ AddType text/vtt vtt RewriteRule ^container-platform/(4\.10|4\.11|4\.12|4\.13|4\.14)/cicd/pipelines/securing-webhooks-with-event-listeners.html /pipelines/latest/secure/securing-webhooks-with-event-listeners.html [L,R=302] # redirect top-level without filespec to the about file - RewriteRule ^pipelines/(1\.10|1\.11|1\.12|1\.13|1\.14)/?$ /pipelines/$1/about/understanding-openshift-pipelines.html [L,R=302] + RewriteRule ^pipelines/(1\.10|1\.11|1\.12|1\.13|1\.14|1\.15|1\.16|1\.17|1\.18)/?$ /pipelines/$1/about/understanding-openshift-pipelines.html [L,R=302] # OSD redirects for new content @@ -446,6 +446,9 @@ AddType text/vtt vtt # ROSA UI short-term redirect https://issues.redhat.com/browse/OSDOCS-3511 RewriteRule rosa/post_installation_configuration/machine-configuration-tasks.html rosa/rosa_getting_started/rosa-getting-started.html#rosa-getting-started-configure-an-idp-and-grant-access_rosa-getting-started [NE,R=301] + # ROSA outdated log forwarding tutorial redirect https://issues.redhat.com/browse/OSDOCS-10983 + RewriteRule rosa/cloud_experts_tutorials/cloud-experts-rosa-cloudwatch-sts.html rosa/observability/logging/log_collection_forwarding/configuring-log-forwarding.html#rosa-cluster-logging-collector-log-forward-sts-cloudwatch_configuring-log-forwarding [NE,R=301] + # ACS welcome page redirect to StackRox docs # RewriteRule ^acs/?(.*)$ https://help.stackrox.com/ [NE,R=301] @@ -502,15 +505,15 @@ AddType text/vtt vtt RewriteRule ^container-platform/(4\.4|4\.5)/authentication/(allowing-javascript-access-api-server|encrypting-etcd|certificate-types-descriptions)\.html$ /container-platform/$1/security/$2\.html [NE,R=301] - # The following rule prevents an infinite redirect loop when browsing to /container-platform/4.15/virt/about_virt/about-virt.html - # RewriteRule ^container-platform/4\.15/virt/about_virt/about-virt.html$ - [L] + # The following rule prevents an infinite redirect loop when browsing to /container-platform/4.16/virt/about_virt/about-virt.html + RewriteRule ^container-platform/4\.16/virt/about_virt/about-virt.html$ - [L] # OpenShift Virtualization (CNV) catchall redirect; use when CNV releases asynchronously from OCP. Do not change the 302 to a 301. # When uncommented, this redirects all `virt` directory traffic to the about-virt page. # Pay mind to the redirect directly above this which prevents redirect loops. # To activate the redirects, uncomment the next and previous lines and update the version number to the pending release. - # RewriteRule container-platform/4\.15/virt/(?!about-virt\.html)(.+)$ /container-platform/4.15/virt/about_virt/about-virt.html [NE,R=302] + RewriteRule container-platform/4\.16/virt/(?!about-virt\.html)(.+)$ /container-platform/4.16/virt/about_virt/about-virt.html [NE,R=302] # Red Hat OpenShift support for Windows Containers (WMCO) catchall redirect; use when WMCO releases asynchronously from OCP. Do not change the 302 to a 301. diff --git a/.s2i/httpd-cfg/01-community.conf b/.s2i/httpd-cfg/01-community.conf index 67d21aa3f38e..209f6f561549 100644 --- a/.s2i/httpd-cfg/01-community.conf +++ b/.s2i/httpd-cfg/01-community.conf @@ -159,14 +159,14 @@ AddType text/vtt vtt RewriteRule ^latest/install_config/upgrades\.html(.*)$ /latest/install_config/upgrading/index.html$1 [NE,R=301] RewriteRule ^latest/install_config/upgrading/(.*)$ /latest/upgrading/$1 [NE,R=301] - # The following rule prevents an infinite redirect loop when browsing to /(latest|4\.15)/virt/about_virt/about-virt.html - # RewriteRule ^(latest|4\.15)/virt/about_virt/about-virt.html$ - [L] + # The following rule prevents an infinite redirect loop when browsing to /(latest|4\.16)/virt/about_virt/about-virt.html + RewriteRule ^(latest|4\.16)/virt/about_virt/about-virt.html$ - [L] # OpenShift Virtualization (CNV) catchall redirect; use when CNV releases asynchronously from OCP. Do not change the 302 to a 301. # When uncommented, this redirects all `virt` directory traffic to the about-virt page. # Pay mind to the redirect directly above this which prevents redirect loops. # To activate the redirects, uncomment the next and previous lines and update the version number to the pending release. - # RewriteRule ^(latest|4\.15)/virt/(?!about-virt\.html)(.+)$ /$1/virt/about_virt/about-virt.html [NE,R=302] + RewriteRule ^(latest|4\.16)/virt/(?!about-virt\.html)(.+)$ /$1/virt/about_virt/about-virt.html [NE,R=302] # Red Hat OpenShift support for Windows Containers (WMCO) catchall redirect; use when WMCO releases asynchronously from OCP. Do not change the 302 to a 301. # When uncommented, this redirects all `windows_containers` directory traffic to the /windows_containers/index.html page. diff --git a/.vale/styles/config/vocabularies/OpenShiftDocs/accept.txt b/.vale/styles/config/vocabularies/OpenShiftDocs/accept.txt index d803ff4a5906..16c5f5e09ace 100644 --- a/.vale/styles/config/vocabularies/OpenShiftDocs/accept.txt +++ b/.vale/styles/config/vocabularies/OpenShiftDocs/accept.txt @@ -1,11 +1,13 @@ # Regex terms added to accept.txt are ignored by the Vale linter and override RedHat Vale rules. # Add terms that have a corresponding incorrectly capitalized form to reject.txt. [Ff]ronthaul +[Mm][Bb]ps [Mm]idhaul [Pp]assthrough [Pp]ostinstall [Pp]recaching [Pp]reinstall +[Oo]n-premise [Rr]ealtime [Tt]elco Assisted Installer @@ -17,7 +19,6 @@ gpspipe hyperthreads? KPIs? linuxptp -[Mm][Bb]ps Mellanox MetalLB NICs? diff --git a/_attributes/attributes-openshift-dedicated.adoc b/_attributes/attributes-openshift-dedicated.adoc index 4cce2014c835..409b9f96fac4 100644 --- a/_attributes/attributes-openshift-dedicated.adoc +++ b/_attributes/attributes-openshift-dedicated.adoc @@ -50,4 +50,6 @@ :hcp: hosted control planes :hcp-title: ROSA with HCP :hcp-title-first: {product-title} (ROSA) with {hcp} (HCP) +:rosa-classic: ROSA (classic architecture) +:rosa-classic-first: {product-title} (ROSA) (classic architecture) //ROSA CLI variables diff --git a/_attributes/common-attributes.adoc b/_attributes/common-attributes.adoc index afc1acaf3408..61c10ba22034 100644 --- a/_attributes/common-attributes.adoc +++ b/_attributes/common-attributes.adoc @@ -46,7 +46,7 @@ endif::[] :rh-storage: OpenShift Data Foundation :rh-rhacm-first: Red Hat Advanced Cluster Management (RHACM) :rh-rhacm: RHACM -:rh-rhacm-version: 2.9 +:rh-rhacm-version: 2.10 :sandboxed-containers-first: OpenShift sandboxed containers :sandboxed-containers-operator: OpenShift sandboxed containers Operator :sandboxed-containers-version: 1.5 @@ -164,7 +164,7 @@ endif::[] :product-rosa: Red Hat OpenShift Service on AWS :SMProductName: Red Hat OpenShift Service Mesh :SMProductShortName: Service Mesh -:SMProductVersion: 2.5.1 +:SMProductVersion: 2.5.2 :MaistraVersion: 2.5 :KialiProduct: Kiali Operator provided by Red Hat :SMPlugin: OpenShift Service Mesh Console (OSSMC) plugin @@ -203,9 +203,18 @@ endif::[] :osdk_ver: 1.31.0 //Operator SDK version that shipped with the previous OCP 4.x release :osdk_ver_n1: 1.28.0 +//Version-agnostic OLM +:olm-first: Operator Lifecycle Manager (OLM) +:olm: OLM +//Initial version of OLM that shipped with OCP 4, aka "v0" +:olmv0: legacy OLM +:olmv0-caps: Legacy OLM +:olmv0-first: legacy Operator Lifecycle Manager (OLM) +:olmv0-first-caps: Legacy Operator Lifecycle Manager (OLM) //Next-gen (OCP 4.14+) Operator Lifecycle Manager, aka "v1" :olmv1: OLM 1.0 :olmv1-first: Operator Lifecycle Manager (OLM) 1.0 +// :ztp-first: GitOps Zero Touch Provisioning (ZTP) :ztp: GitOps ZTP :3no: three-node OpenShift @@ -303,7 +312,9 @@ endif::openshift-origin[] :entra-short: Workload ID -// Cluster API Providers +// Cluster API terminology +// Cluster CAPI Operator +:cluster-capi-operator: Cluster CAPI Operator // Cluster API Provider Amazon Web Services (AWS) :cap-aws-first: Cluster API Provider Amazon Web Services (AWS) :cap-aws-short: Cluster API Provider AWS diff --git a/_distro_map.yml b/_distro_map.yml index 08ed33ae2159..02927e3af1b7 100644 --- a/_distro_map.yml +++ b/_distro_map.yml @@ -187,6 +187,19 @@ openshift-rosa: rosa-preview: name: '' dir: rosa-preview/ +openshift-rosa-hcp: + name: Red Hat OpenShift Service on AWS + author: OpenShift Documentation Project + site: commercial + site_name: Documentation + site_url: https://docs.openshift.com/ + branches: + enterprise-4.15: + name: '' + dir: rosa-hcp/ + rosa-preview: + name: '' + dir: rosa-hcp-preview/ openshift-rosa-portal: name: Red Hat OpenShift Service on AWS author: OpenShift Documentation Project @@ -326,6 +339,9 @@ openshift-serverless: serverless-docs-1.32: name: '1.32' dir: serverless/1.32 + serverless-docs-1.33: + name: '1.33' + dir: serverless/1.33 openshift-gitops: name: Red Hat OpenShift GitOps author: OpenShift documentation team @@ -386,3 +402,13 @@ openshift-builds: build-docs-1.0: name: '1.0' dir: builds/1.0 +openshift-lightspeed: + name: Red Hat OpenShift Lightspeed + author: OpenShift documentation team + site: commercial + site_name: Documentation + site_url: https://docs.openshift.com/ + branches: + lightspeed-docs-1.0tp1: + name: '1.0tp1' + dir: lightspeed/1.0tp1 diff --git a/_templates/_page_openshift.html.erb b/_templates/_page_openshift.html.erb index 4cae4062563f..cb7a1482455e 100644 --- a/_templates/_page_openshift.html.erb +++ b/_templates/_page_openshift.html.erb @@ -43,7 +43,7 @@ <% unsupported_versions = ["3.0", "3.1", "3.2", "3.3", "3.4", "3.5", "3.6", "3.7", "3.9", "3.10", "4.1", "4.2", "4.3", "4.4", "4.5", "4.6", "4.7", "4.8", "4.9", "4.10", "4.11"]; - unsupported_versions_acs = ["3.65", "3.66", "3.67", "3.68", "3.69", "3.70", "3.71", "3.72", "3.73", "3.74", "4.0", "4.1", "4.2"]; + unsupported_versions_acs = ["3.65", "3.66", "3.67", "3.68", "3.69", "3.70", "3.71", "3.72", "3.73", "3.74", "4.0", "4.1", "4.2", "4.3"]; unsupported_versions_serverless = []; @@ -67,7 +67,7 @@ <% end %> - <% if (version == "4.16") && (distro_key != "openshift-webscale" && distro_key != "openshift-dpu") %> + <% if (version == "4.16") && (distro_key != "openshift-webscale" && distro_key != "openshift-dpu" && distro_key != "rosa-hcp") %> +
+

OpenShift Lightspeed

+
+

+ + Lightspeed +

+

+ OpenShift Lightspeed documentation. +

+
+
diff --git a/security/certificate_types_descriptions/etcd-certificates.adoc b/security/certificate_types_descriptions/etcd-certificates.adoc index 268ed00dd495..d97fe43d38aa 100644 --- a/security/certificate_types_descriptions/etcd-certificates.adoc +++ b/security/certificate_types_descriptions/etcd-certificates.adoc @@ -14,13 +14,20 @@ etcd certificates are signed by the etcd-signer; they come from a certificate au The CA certificates are valid for 10 years. The peer, client, and server certificates are valid for three years. +include::modules/rotating-certificate-authority.adoc[leveloffset=+1] +include::modules/etcd-cert-alerts-metrics-signer.adoc[leveloffset=+1] + +.Additional resources + +* xref:../../security/certificate_types_descriptions/etcd-certificates.adoc#rotating-certificate-authority_cert-types-etcd-certificates[Rotating the etcd certificate] + == Management These certificates are only managed by the system and are automatically rotated. == Services -etcd certificates are used for encrypted communication between etcd member peers, as well as encrypted client traffic. The following certificates are generated and used by etcd and other processes that communicate with etcd: +etcd certificates are used for encrypted communication between etcd member peers and encrypted client traffic. The following certificates are generated and used by etcd and other processes that communicate with etcd: * Peer certificates: Used for communication between etcd members. * Client certificates: Used for encrypted server-client communication. Client certificates are currently used by the API server only, and no other service should connect to etcd directly except for the proxy. Client secrets (`etcd-client`, `etcd-metric-client`, `etcd-metric-signer`, and `etcd-signer`) are added to the `openshift-config`, `openshift-monitoring`, and `openshift-kube-apiserver` namespaces. diff --git a/security/certificate_types_descriptions/machine-config-operator-certificates.adoc b/security/certificate_types_descriptions/machine-config-operator-certificates.adoc index 6ec727423c6a..70e9a69890e9 100644 --- a/security/certificate_types_descriptions/machine-config-operator-certificates.adoc +++ b/security/certificate_types_descriptions/machine-config-operator-certificates.adoc @@ -25,7 +25,7 @@ include::snippets/mcs-endpoint-limitation.adoc[] .Additional resources -* xref:../../post_installation_configuration/machine-configuration-tasks.adoc#understanding-the-machine-config-operator[Understanding the Machine Config Operator]. +* xref:../../machine_configuration/index.adoc#machine-config-operator_machine-config-overview[Machine Config Operator]. * xref:../../networking/openshift_sdn/about-openshift-sdn.adoc#about-openshift-sdn[About the OpenShift SDN network plugin]. diff --git a/security/compliance_operator/co-management/compliance-operator-installation.adoc b/security/compliance_operator/co-management/compliance-operator-installation.adoc index 97c53b9e46ee..f3bb95957219 100644 --- a/security/compliance_operator/co-management/compliance-operator-installation.adoc +++ b/security/compliance_operator/co-management/compliance-operator-installation.adoc @@ -10,7 +10,7 @@ Before you can use the Compliance Operator, you must ensure it is deployed in th [IMPORTANT] ==== -The Compliance Operator might report incorrect results on managed platforms, such as OpenShift Dedicated, Red Hat OpenShift Service on AWS, and Microsoft Azure Red Hat OpenShift. For more information, see the link:https://access.redhat.com/solutions/6983418[Red Hat Knowledgebase Solution #6983418]. +The Compliance Operator might report incorrect results on managed platforms, such as OpenShift Dedicated, Red{nbsp}Hat OpenShift Service on AWS Classic, and Microsoft Azure Red{nbsp}Hat OpenShift. For more information, see the Knowledgebase article link:https://access.redhat.com/solutions/6983418[Compliance Operator reports incorrect results on Managed Services]. ==== include::modules/compliance-operator-console-installation.adoc[leveloffset=+1] @@ -24,6 +24,8 @@ You can create a custom SCC for the Compliance Operator scanner pod service acco include::modules/compliance-operator-cli-installation.adoc[leveloffset=+1] +include::modules/compliance-operator-rosa-installation.adoc[leveloffset=+1] + [IMPORTANT] ==== If the `restricted` Security Context Constraints (SCC) have been modified to contain the `system:authenticated` group or has added `requiredDropCapabilities`, the Compliance Operator may not function properly due to permissions issues. diff --git a/security/compliance_operator/co-scans/compliance-operator-supported-profiles.adoc b/security/compliance_operator/co-scans/compliance-operator-supported-profiles.adoc index 710ff5b44088..07601787871f 100644 --- a/security/compliance_operator/co-scans/compliance-operator-supported-profiles.adoc +++ b/security/compliance_operator/co-scans/compliance-operator-supported-profiles.adoc @@ -18,7 +18,7 @@ authorized auditor to achieve compliance with a standard. [IMPORTANT] ==== -The Compliance Operator might report incorrect results on managed platforms, such as OpenShift Dedicated, Red Hat OpenShift Service on AWS, and Azure Red Hat OpenShift. For more information, see the link:https://access.redhat.com/solutions/6983418[Red Hat Knowledgebase Solution #6983418]. +The Compliance Operator might report incorrect results on some managed platforms, such as OpenShift Dedicated and Azure Red Hat OpenShift. For more information, see the link:https://access.redhat.com/solutions/6983418[Red Hat Knowledgebase Solution #6983418]. ==== include::modules/compliance-supported-profiles.adoc[leveloffset=+1] diff --git a/security/compliance_operator/compliance-operator-release-notes.adoc b/security/compliance_operator/compliance-operator-release-notes.adoc index a1e6e5f02acd..7cfd9e9561a5 100644 --- a/security/compliance_operator/compliance-operator-release-notes.adoc +++ b/security/compliance_operator/compliance-operator-release-notes.adoc @@ -15,6 +15,29 @@ For an overview of the Compliance Operator, see xref:../../security/compliance_o To access the latest release, see xref:../../security/compliance_operator/co-management/compliance-operator-updating.adoc#olm-preparing-upgrade_compliance-operator-updating[Updating the Compliance Operator]. +[id="compliance-operator-release-notes-1-5-0_{context}"] +== OpenShift Compliance Operator 1.5.0 + +The following advisory is available for the OpenShift Compliance Operator 1.5.0: + +* link:https://access.redhat.com/errata/RHBA-2024:3533[RHBA-2024:3533 - OpenShift Compliance Operator 1.5.0 bug fix and enhancement update] + +[id="compliance-operator-1-5-0-new-features-and-enhancements_{context}"] +=== New features and enhancements + +* With this update, the Compliance Operator provides a unique profile ID for easier programmatic use. (link:https://issues.redhat.com/browse/CMP-2450[*CMP-2450*]) + +* With this release, the Compliance Operator is now tested and supported on the ROSA HCP environment. The Compliance Operator loads only Node profiles when running on ROSA HCP. This is because a Red{nbsp}Hat managed platform restricts access to the control plane, which makes Platform profiles irrelevant to the operator's function.(link:https://issues.redhat.com/browse/CMP-2581[*CMP-2581*]) + +[id="compliance-operator-1-5-0-bug-fixes_{context}"] +=== Bug fixes + +* CVE-2024-2961 is resolved in the Compliance Operator 1.5.0 release. (link:https://access.redhat.com/security/cve/CVE-2024-2961[*CVE-2024-2961*]) + +* Previously, for ROSA HCP systems, profile listings were incorrect. This update allows the Compliance Operator to provide correct profile output. (link:https://issues.redhat.com/browse/OCPBUGS-34535[*OCPBUGS-34535*]) + +* With this release, namespaces can be excluded from the `ocp4-configure-network-policies-namespaces` check by setting the `ocp4-var-network-policies-namespaces-exempt-regex` variable in the tailored profile. (link:https://issues.redhat.com/browse/cmp-2543[*CMP-2543*]) + [id="compliance-operator-release-notes-1-4-1"] == OpenShift Compliance Operator 1.4.1 @@ -108,10 +131,9 @@ This update addresses a CVE in an underlying dependency. * You can install and use the Compliance Operator in an {product-title} cluster running in FIPS mode. + -[IMPORTANT] -==== -To enable FIPS mode for your cluster, you must run the installation program from a {op-system-base} computer configured to operate in FIPS mode. For more information about configuring FIPS mode on RHEL, see link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/9/html/security_hardening/assembly_installing-the-system-in-fips-mode_security-hardening[Installing the system in FIPS mode]. -==== +-- +include::snippets/fips-snippet.adoc[] +-- [id="compliance-operator-1-3-1-known-issue"] === Known issue diff --git a/security/container_security/security-container-signature.adoc b/security/container_security/security-container-signature.adoc index d5f9937e864c..7556bf20be2f 100644 --- a/security/container_security/security-container-signature.adoc +++ b/security/container_security/security-container-signature.adoc @@ -33,4 +33,4 @@ include::modules/containers-signature-verify-skopeo.adoc[leveloffset=+2] [id="additional-resources_security-container-signature"] [role="_additional-resources"] == Additional resources -* xref:../../post_installation_configuration/machine-configuration-tasks.adoc#machine-config-overview-post-install-machine-configuration-tasks[Machine Config Overview] +* xref:../../machine_configuration/index.adoc#machine-config-overview[Machine Config Overview] diff --git a/security/container_security/security-network.adoc b/security/container_security/security-network.adoc index 4c786c049989..1470c3b846e1 100644 --- a/security/container_security/security-network.adoc +++ b/security/container_security/security-network.adoc @@ -21,7 +21,7 @@ include::modules/security-network-policies.adoc[leveloffset=+1] [role="_additional-resources"] .Additional resources -* xref:../../networking/openshift_network_security/network_policy/about-network-policy.adoc#about-network-policy[About network policy] +* xref:../../networking/network_security/network_policy/about-network-policy.adoc#about-network-policy[About network policy] // Multiple pod networks include::modules/security-network-multiple-pod.adoc[leveloffset=+1] diff --git a/security/file_integrity_operator/file-integrity-operator-release-notes.adoc b/security/file_integrity_operator/file-integrity-operator-release-notes.adoc index fd4019ae59f9..c84594829d4f 100644 --- a/security/file_integrity_operator/file-integrity-operator-release-notes.adoc +++ b/security/file_integrity_operator/file-integrity-operator-release-notes.adoc @@ -15,6 +15,18 @@ For an overview of the File Integrity Operator, see xref:../../security/file_int To access the latest release, see xref:../../security/file_integrity_operator/file-integrity-operator-updating.adoc#olm-preparing-upgrade_file-integrity-operator-updating[Updating the File Integrity Operator]. +[id="file-integrity-operator-release-notes-1-3-4"] +== OpenShift File Integrity Operator 1.3.4 + +The following advisory is available for the OpenShift File Integrity Operator 1.3.4: + +* link:https://access.redhat.com/errata/RHBA-2024:2946[RHBA-2024:2946 OpenShift File Integrity Operator Bug Fix and Enhancement Update] + +[id="file-integrity-operator-1-3-4-bug-fixes"] +=== Bug fixes + +Previously, File Integrity Operator would issue a `NodeHasIntegrityFailure` alert due to multus certificate rotation. With this release, the alert and failing status are now correctly triggered. (link:https://issues.redhat.com/browse/OCPBUGS-31257[*OCPBUGS-31257*]) + [id="file-integrity-operator-release-notes-1-3-3"] == OpenShift File Integrity Operator 1.3.3 @@ -28,11 +40,10 @@ This update addresses a CVE in an underlying dependency. === New features and enhancements * You can install and use the File Integrity Operator in an {product-title} cluster running in FIPS mode. - -[IMPORTANT] -==== -To enable FIPS mode for your cluster, you must run the installation program from a {op-system-base} computer configured to operate in FIPS mode. For more information about configuring FIPS mode on RHEL, see (link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/9/html/security_hardening/assembly_installing-the-system-in-fips-mode_security-hardening[Installing the system in FIPS mode]) -==== ++ +-- +include::snippets/fips-snippet.adoc[] +-- [id="file-integrity-operator-1-3-3-bug-fixes"] === Bug fixes diff --git a/security/seccomp-profiles.adoc b/security/seccomp-profiles.adoc index 72f85eb5a516..1eca1b46bd36 100644 --- a/security/seccomp-profiles.adoc +++ b/security/seccomp-profiles.adoc @@ -52,4 +52,4 @@ The custom SCC must have the appropriate priority to be automatically assigned t [role="_additional-resources"] == Additional resources * xref:../authentication/managing-security-context-constraints.adoc[Managing security context constraints] -* xref:../post_installation_configuration/machine-configuration-tasks.adoc[Postinstallation machine configuration tasks] +* xref:../machine_configuration/index.adoc#machine-config-overview[Machine Config Overview] diff --git a/service_mesh/v2x/ossm-traffic-manage.adoc b/service_mesh/v2x/ossm-traffic-manage.adoc index c3847515570b..e360bccafea9 100644 --- a/service_mesh/v2x/ossm-traffic-manage.adoc +++ b/service_mesh/v2x/ossm-traffic-manage.adoc @@ -19,6 +19,13 @@ endif::openshift-rosa,openshift-dedicated[] include::modules/ossm-routing-ingress.adoc[leveloffset=+2] +ifdef::openshift-enterprise[] +[role="_additional-resources"] +.Additional resources + +* xref:../../networking/configuring-node-port-service-range.adoc#configuring-node-port-service-range[Configuring the node port service range] +endif::[] + include::modules/ossm-routing-gateways.adoc[leveloffset=+2] [id="ossm-auto-route_{context}"] diff --git a/snippets/about-multiarch-tuning-operator.adoc b/snippets/about-multiarch-tuning-operator.adoc new file mode 100644 index 000000000000..b196ca636b53 --- /dev/null +++ b/snippets/about-multiarch-tuning-operator.adoc @@ -0,0 +1,12 @@ +// Snippet included in the following modules + +// * post_installation_configuration/cluster-tasks.adoc/multi-architecture-modify-machine-set-aws.adoc +// * post_installation_configuration/cluster-tasks.adoc/multi-architecture-modify-machine-set-gcp.adoc +// * post_installation_configuration/cluster-tasks.adoc/multi-architecture-modify-machine-set.adoc + +:_mod-docs-content-type: SNIPPET + +[NOTE] +==== +Before adding a secondary architecture node to your cluster, it is recommended to install the Multiarch Tuning Operator, and deploy a `ClusterPodPlacementConfig` custom resource. For more information, see "Managing workloads on multi-architecture clusters by using the Multiarch Tuning Operator". +==== diff --git a/snippets/capabilities-table.adoc b/snippets/capabilities-table.adoc index 502228066832..b9bfa78fc74f 100644 --- a/snippets/capabilities-table.adoc +++ b/snippets/capabilities-table.adoc @@ -25,6 +25,9 @@ The following table describes the `baselineCapabilitySet` values. |`v4.15` |Specify this option when you want to enable the default capabilities for {product-title} 4.15. By specifying `v4.15`, capabilities that are introduced in newer versions of {product-title} are not enabled. The default capabilities in {product-title} 4.15 are `baremetal`, `MachineAPI`, `marketplace`, `OperatorLifecycleManager`, `openshift-samples`, `Console`, `Insights`, `Storage`, `CSISnapshot`, `NodeTuning`, `ImageRegistry`, `Build`, `CloudCredential`, and `DeploymentConfig`. +|`v4.16` +|Specify this option when you want to enable the default capabilities for {product-title} 4.16. By specifying `v4.16`, capabilities that are introduced in newer versions of {product-title} are not enabled. The default capabilities in {product-title} 4.16 are `baremetal`, `MachineAPI`, `marketplace`, `OperatorLifecycleManager`, `openshift-samples`, `Console`, `Insights`, `Storage`, `CSISnapshot`, `NodeTuning`, `ImageRegistry`, `Build`, `CloudCredential`, `DeploymentConfig`, and `CloudControllerManager`. + |`None` |Specify when the other sets are too large, and you do not need any capabilities or want to fine-tune via `additionalEnabledCapabilities`. diff --git a/snippets/custom-dns-server.adoc b/snippets/custom-dns-server.adoc index dbf46f6ff394..273de3e758f4 100644 --- a/snippets/custom-dns-server.adoc +++ b/snippets/custom-dns-server.adoc @@ -3,7 +3,6 @@ // * modules/installation-custom-aws-vpc.adoc // * modules/installation-about-custom-azure-vnet.adoc // * modules/installation-custom-gcp-vpc.adoc -// * modules/installation-custom-alibaba-vpc.adoc // * modules/installation-ibm-power-vs.adoc :_mod-docs-content-type: SNIPPET diff --git a/snippets/distr-tracing-tempo-required-secret-parameters.adoc b/snippets/distr-tracing-tempo-required-secret-parameters.adoc index dd3712b459a5..2656b961d5bc 100644 --- a/snippets/distr-tracing-tempo-required-secret-parameters.adoc +++ b/snippets/distr-tracing-tempo-required-secret-parameters.adoc @@ -5,6 +5,7 @@ :_mod-docs-content-type: SNIPPET +[id="required_secret_parameters_{context}"] .Required secret parameters [cols="25h,~"] |=== @@ -12,7 +13,7 @@ //source: https://github.com/grafana/tempo-operator/blob/main/docs/tempostack/object_storage.md -|link:https://access.redhat.com/documentation/en-us/red_hat_openshift_data_foundation/[Red Hat OpenShift Data Foundation] +|link:https://access.redhat.com/documentation/en-us/red_hat_openshift_data_foundation/[{odf-full}] | `name: tempostack-dev-odf # example` diff --git a/snippets/distr-tracing-tempo-tempomonolithic-custom-resource.adoc b/snippets/distr-tracing-tempo-tempomonolithic-custom-resource.adoc new file mode 100644 index 000000000000..6fd6b0216318 --- /dev/null +++ b/snippets/distr-tracing-tempo-tempomonolithic-custom-resource.adoc @@ -0,0 +1,32 @@ +// :_mod-docs-content-type: SNIPPET +// Text snippet included in the following modules: +// +// * modules/distr-tracing-tempo-install-tempomonolithic-web-console.adoc +// * modules/distr-tracing-tempo-install-tempomonolithic-cli.adoc +The following `TempoMonolithic` CR creates a TempoMonolithic deployment with trace ingestion over OTLP/gRPC and OTLP/HTTP, storing traces in a supported type of storage and exposing Jaeger UI via a route: ++ +[source,yaml] +---- +apiVersion: tempo.grafana.com/v1alpha1 +kind: TempoMonolithic +metadata: + name: + namespace: +spec: + storage: + traces: + backend: # <1> + size: Gi # <2> + s3: # <3> + secret: # <4> + jaegerui: + enabled: true # <5> + route: + enabled: true # <6> +---- +<1> Type of storage for storing traces: in-memory storage, a persistent volume, or object storage. The value for the `tmpfs` in-memory storage is `memory`. The value for a persistent volume is `pv`. The accepted values for object storage are `s3`, `gcs`, or `azure`, depending on the used object store type. +<2> Memory size: For in-memory storage, this means the size of the `tmpfs` volume, where the default is `2Gi`. For a persistent volume, this means the size of the persistent volume claim, where the default is `10Gi`. For object storage, this means the size of the persistent volume claim for the Tempo WAL, where the default is `10Gi`. +<3> Optional: For object storage, the type of object storage. The accepted values are `s3`, `gcs`, and `azure`, depending on the used object store type. +<4> Optional: For object storage, the value of the `name` in the `metadata` of the storage secret. The storage secret must be in the same namespace as the TempoMonolithic instance and contain the fields specified in "Table 1. Required secret parameters" in the section "Object storage setup". +<5> Enables the Jaeger UI. +<6> Enables creation of a route for the Jaeger UI. diff --git a/snippets/fips-snippet.adoc b/snippets/fips-snippet.adoc new file mode 100644 index 000000000000..c92166bde1e0 --- /dev/null +++ b/snippets/fips-snippet.adoc @@ -0,0 +1,34 @@ +// Text snippet included in the following modules: +// +// * modules/agent-installer-fips-compliance.adoc +// * modules/installation-aws-config-yaml.adoc +// * modules/installation-aws-config-yaml.adoc +// * modules/installation-azure-config-yaml.adoc +// * modules/installation-azure-config-yaml.adoc +// * modules/installation-azure-config-yaml.adoc +// * modules/installation-azure-config-yaml.adoc +// * modules/installation-azure-stack-hub-config-yaml.adoc +// * modules/installation-bare-metal-config-yaml.adoc +// * modules/installation-configuration-parameters.adoc +// * modules/installation-gcp-config-yaml.adoc +// * modules/installation-gcp-user-infra-shared-vpc-config-yaml.adoc +// * modules/installation-ibm-cloud-config-yaml.adoc +// * modules/installation-vsphere-config-yaml.adoc +// * modules/machine-config-overview.adoc +// * modules/rhel-compute-requirements.adoc +// * modules/rosa-sts-interactive-cluster-creation-mode-options.adoc +// * modules/security-compliance-nist.adoc +// +// Text snippet included in the following assemblies: +// +// * security/compliance_operator/compliance-operator-release-notes.adoc +// * security/file_integrity_operator/file-integrity-operator-release-notes.adoc + +:_mod-docs-content-type: SNIPPET + +[IMPORTANT] +==== +To enable FIPS mode for your cluster, you must run the installation program from a {op-system-base-full} computer configured to operate in FIPS mode. For more information about configuring FIPS mode on RHEL, see link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/9/html/security_hardening/assembly_installing-the-system-in-fips-mode_security-hardening[Installing the system in FIPS mode]. + +When running {op-system-base-full} or {op-system-first} booted in FIPS mode, {product-title} core components use the {op-system-base} cryptographic libraries that have been submitted to NIST for FIPS 140-2/140-3 Validation on only the x86_64, ppc64le, and s390x architectures. +==== diff --git a/snippets/ibu-ApplicationBackupRestoreLso.adoc b/snippets/ibu-ApplicationBackupRestoreLso.adoc new file mode 100644 index 000000000000..17b5a67176ac --- /dev/null +++ b/snippets/ibu-ApplicationBackupRestoreLso.adoc @@ -0,0 +1,40 @@ +[source,yaml] +---- +apiVersion: velero.io/v1 +kind: Backup +metadata: + labels: + velero.io/storage-location: default + name: backup-app + namespace: openshift-adp +spec: + includedNamespaces: + - test + includedNamespaceScopedResources: + - secrets + - persistentvolumeclaims + - deployments + - statefulsets + - configmaps + - cronjobs + - services + - job + - poddisruptionbudgets + - <1> + excludedClusterScopedResources: + - persistentVolumes +--- +apiVersion: velero.io/v1 +kind: Restore +metadata: + name: test-app + namespace: openshift-adp + labels: + velero.io/storage-location: default + annotations: + lca.openshift.io/apply-wave: "4" +spec: + backupName: + backup-app +---- +<1> Define custom resources for your application. \ No newline at end of file diff --git a/snippets/ibu-ApplicationBackupRestoreLvms.adoc b/snippets/ibu-ApplicationBackupRestoreLvms.adoc new file mode 100644 index 000000000000..83338a6c3cf1 --- /dev/null +++ b/snippets/ibu-ApplicationBackupRestoreLvms.adoc @@ -0,0 +1,50 @@ +[source,yaml] +---- +apiVersion: velero.io/v1 +kind: Backup +metadata: + labels: + velero.io/storage-location: default + name: backup-app + namespace: openshift-adp +spec: + includedNamespaces: + - test + includedNamespaceScopedResources: + - secrets + - persistentvolumeclaims + - deployments + - statefulsets + - configmaps + - cronjobs + - services + - job + - poddisruptionbudgets + - <1> + includedClusterScopedResources: + - persistentVolumes <2> + - logicalvolumes.topolvm.io <3> + - volumesnapshotcontents <4> +--- +apiVersion: velero.io/v1 +kind: Restore +metadata: + name: test-app + namespace: openshift-adp + labels: + velero.io/storage-location: default + annotations: + lca.openshift.io/apply-wave: "4" +spec: + backupName: + backup-app + restorePVs: true + restoreStatus: + includedResources: + - logicalvolumes <5> +---- +<1> Define custom resources for your application. +<2> Required field. +<3> Required field +<4> Optional if you use {lvms} volume snapshots. +<5> Required field. \ No newline at end of file diff --git a/snippets/ibu-ApplicationClusterScopedBackupRestore.adoc b/snippets/ibu-ApplicationClusterScopedBackupRestore.adoc new file mode 100644 index 000000000000..e49d37e22234 --- /dev/null +++ b/snippets/ibu-ApplicationClusterScopedBackupRestore.adoc @@ -0,0 +1,35 @@ +[source,yaml] +---- +apiVersion: velero.io/v1 +kind: Backup +metadata: + annotations: + lca.openshift.io/apply-label: "apiextensions.k8s.io/v1/customresourcedefinitions/test.example.com,security.openshift.io/v1/securitycontextconstraints/test,rbac.authorization.k8s.io/v1/clusterroles/test-role,rbac.authorization.k8s.io/v1/clusterrolebindings/system:openshift:scc:test" <1> + name: backup-app-cluster-resources + labels: + velero.io/storage-location: default + namespace: openshift-adp +spec: + includedClusterScopedResources: + - customresourcedefinitions + - securitycontextconstraints + - clusterrolebindings + - clusterroles + excludedClusterScopedResources: + - Namespace +--- +apiVersion: velero.io/v1 +kind: Restore +metadata: + name: test-app-cluster-resources + namespace: openshift-adp + labels: + velero.io/storage-location: default + annotations: + lca.openshift.io/apply-wave: "3" <2> +spec: + backupName: + backup-app-cluster-resources +---- +<1> Replace the example resource name with your actual resources. +<2> The `lca.openshift.io/apply-wave` value must be higher than the value in the platform `Restore` CRs and lower than the value in the application namespace-scoped `Restore` CR. \ No newline at end of file diff --git a/snippets/ibu-PlatformBackupRestore.adoc b/snippets/ibu-PlatformBackupRestore.adoc new file mode 100644 index 000000000000..9ead412ae0e4 --- /dev/null +++ b/snippets/ibu-PlatformBackupRestore.adoc @@ -0,0 +1,39 @@ +[source,yaml] +---- +apiVersion: velero.io/v1 +kind: Backup +metadata: + name: acm-klusterlet + annotations: + lca.openshift.io/apply-label: "apps/v1/deployments/open-cluster-management-agent/klusterlet,v1/secrets/open-cluster-management-agent/bootstrap-hub-kubeconfig,rbac.authorization.k8s.io/v1/clusterroles/klusterlet,v1/serviceaccounts/open-cluster-management-agent/klusterlet,scheduling.k8s.io/v1/priorityclasses/klusterlet-critical,rbac.authorization.k8s.io/v1/clusterroles/open-cluster-management:klusterlet-admin-aggregate-clusterrole,rbac.authorization.k8s.io/v1/clusterrolebindings/klusterlet,operator.open-cluster-management.io/v1/klusterlets/klusterlet,apiextensions.k8s.io/v1/customresourcedefinitions/klusterlets.operator.open-cluster-management.io,v1/secrets/open-cluster-management-agent/open-cluster-management-image-pull-credentials" <1> + labels: + velero.io/storage-location: default + namespace: openshift-adp +spec: + includedNamespaces: + - open-cluster-management-agent + includedClusterScopedResources: + - klusterlets.operator.open-cluster-management.io + - clusterroles.rbac.authorization.k8s.io + - clusterrolebindings.rbac.authorization.k8s.io + - priorityclasses.scheduling.k8s.io + includedNamespaceScopedResources: + - deployments + - serviceaccounts + - secrets + excludedNamespaceScopedResources: [] +--- +apiVersion: velero.io/v1 +kind: Restore +metadata: + name: acm-klusterlet + namespace: openshift-adp + labels: + velero.io/storage-location: default + annotations: + lca.openshift.io/apply-wave: "1" +spec: + backupName: + acm-klusterlet +---- +<1> If your `multiclusterHub` CR does not have `.spec.imagePullSecret` defined and the secret does not exist on the `open-cluster-management-agent` namespace in your hub cluster, remove `v1/secrets/open-cluster-management-agent/open-cluster-management-image-pull-credentials`. \ No newline at end of file diff --git a/snippets/ibu-PlatformBackupRestoreLvms.adoc b/snippets/ibu-PlatformBackupRestoreLvms.adoc new file mode 100644 index 000000000000..51ba1ec5b17a --- /dev/null +++ b/snippets/ibu-PlatformBackupRestoreLvms.adoc @@ -0,0 +1,31 @@ +[source,yaml] +---- +apiVersion: velero.io/v1 +kind: Backup +metadata: + labels: + velero.io/storage-location: default + name: lvmcluster + namespace: openshift-adp +spec: + includedNamespaces: + - openshift-storage + includedNamespaceScopedResources: + - lvmclusters + - lvmvolumegroups + - lvmvolumegroupnodestatuses +--- +apiVersion: velero.io/v1 +kind: Restore +metadata: + name: lvmcluster + namespace: openshift-adp + labels: + velero.io/storage-location: default + annotations: + lca.openshift.io/apply-wave: "2" <1> +spec: + backupName: + lvmcluster +---- +<1> The `lca.openshift.io/apply-wave` value must be lower than the values specified in the application `Restore` CRs. \ No newline at end of file diff --git a/snippets/lvms-disconnected-ImageSetConfig.adoc b/snippets/lvms-disconnected-ImageSetConfig.adoc index 9975d16f91d3..5bd136967202 100644 --- a/snippets/lvms-disconnected-ImageSetConfig.adoc +++ b/snippets/lvms-disconnected-ImageSetConfig.adoc @@ -25,7 +25,7 @@ mirror: - name: registry.redhat.io/ubi9/ubi:latest <9> helm: {} ---- -<1> Set the the maximum size (in GiB) of each file within the image set. +<1> Set the maximum size (in GiB) of each file within the image set. <2> Specify the location in which you want to save the image set. This location can be a registry or a local directory. You must configure the `storageConfig` field unless you are using the Technology Preview OCI feature. <3> Specify the storage URL for the image stream when using a registry. For more information, see _Why use imagestreams_. <4> Specify the channel from which you want to retrieve the {product-title} images. @@ -33,4 +33,4 @@ mirror: <6> Specify the Operator catalog from which you want to retrieve the {product-title} images. <7> Specify the Operator packages to include in the image set. If this field is empty, all packages in the catalog are retrieved. <8> Specify the channels of the Operator packages to include in the image set. You must include the default channel for the Operator package even if you do not use the bundles in that channel. You can find the default channel by running the following command: `$ oc mirror list operators --catalog= --package=`. -<9> Specify any additional images to include in the image set. \ No newline at end of file +<9> Specify any additional images to include in the image set. diff --git a/snippets/lvms-scaling-up-storage-lvmcluster-cr-snippet.adoc b/snippets/lvms-scaling-up-storage-lvmcluster-cr-snippet.adoc index c46233c11d1f..8fc649f52767 100644 --- a/snippets/lvms-scaling-up-storage-lvmcluster-cr-snippet.adoc +++ b/snippets/lvms-scaling-up-storage-lvmcluster-cr-snippet.adoc @@ -22,7 +22,7 @@ spec: <1> Contains the configuration to specify the paths to the devices that you want to add to the LVM volume group. You can specify the device paths in the `paths` field, the `optionalPaths` field, or both. If you do not specify the device paths in both `paths` and `optionalPaths`, {lvms-first} adds the supported unused devices to the LVM volume group. {lvms} adds the devices to the LVM volume group only if the following conditions are met: * The device path exists. -* The device is supported by {lvms}. For information about unsupported devices, see "Devices not supported by {lvms}" in the "Additional resources" section. +* The device is supported by {lvms}. For information about unsupported devices, see "Devices not supported by {lvms}". <2> Specify the device paths. If the device path specified in this field does not exist, or the device is not supported by {lvms}, the `LVMCluster` CR moves to the `Failed` state. <3> Specify the optional device paths. If the device path specified in this field does not exist, or the device is not supported by {lvms}, {lvms} ignores the device without causing an error. + diff --git a/snippets/machine-config-node-disruption-actions.adoc b/snippets/machine-config-node-disruption-actions.adoc new file mode 100644 index 000000000000..d7fbecbe35e9 --- /dev/null +++ b/snippets/machine-config-node-disruption-actions.adoc @@ -0,0 +1,21 @@ +// Text snippet included in the following modules: +// +// * modules/machine-config-node-disruption.adoc +// * modules/machine-config-node-disruption-config.adoc + +When you make any of these changes, the node disruption policy determines which of the following actions are required when the MCO implements the changes: + +* *Reboot*: The MCO drains and reboots the nodes. This is the default behavior. +* *None*: The MCO does not drain or reboot the nodes. The MCO applies the changes with no further action. +* *Drain*: The MCO cordons and drains the nodes of their workloads. The workloads restart with the new configurations. +* *Reload*: For services, the MCO reloads the specified services without restarting the service. +* *Restart*: For services, the MCO fully restarts the specified services. +* *DaemonReload*: The MCO reloads the systemd manager configuration. +* *Special*: This is an internal MCO-only action and cannot be set by the user. + +[NOTE] +==== +* The `Reboot` and `None` actions cannot be used with any other actions, as the `Reboot` and `None` actions override the others. +* Actions are applied in the order that they are set in the node disruption policy list. +* If you make other machine config changes that do require a reboot or other disruption to the nodes, that reboot supercedes the node disruption policy actions. +==== diff --git a/snippets/microshift-healthy-pods-snip.adoc b/snippets/microshift-healthy-pods-snip.adoc new file mode 100644 index 000000000000..05d284597b45 --- /dev/null +++ b/snippets/microshift-healthy-pods-snip.adoc @@ -0,0 +1,31 @@ +// Snippet for healthy MicroShift output with oc get pods -a +// +//* microshift_troubleshooting/microshift-troubleshoot-cluster + +:_mod-docs-content-type: SNIPPET + +[source,terminal] +---- +$ oc get pods -A +---- +.Example output +[source,terminal] +---- +NAMESPACE NAME READY STATUS RESTARTS AGE +default i-06166fbb376f14a8bus-west-2computeinternal-debug-qtwcr 1/1 Running 0 46m +kube-system csi-snapshot-controller-5c6586d546-lprv4 1/1 Running 0 51m +kube-system csi-snapshot-webhook-6bf8ddc7f5-kz6k9 1/1 Running 0 51m +openshift-dns dns-default-45jl7 2/2 Running 0 50m +openshift-dns node-resolver-7wmzf 1/1 Running 0 51m +openshift-ingress router-default-78b86fbf9d-qvj9s 1/1 Running 0 51m +openshift-ovn-kubernetes ovnkube-master-5rfhh 4/4 Running 0 51m +openshift-ovn-kubernetes ovnkube-node-gcnt6 1/1 Running 0 51m +openshift-service-ca service-ca-bf5b7c9f8-pn6rk 1/1 Running 0 51m +openshift-storage topolvm-controller-549f7fbdd5-7vrmv 5/5 Running 0 51m +openshift-storage topolvm-node-rht2m 3/3 Running 0 50m +---- + +[NOTE] +==== +This example output shows basic {microshift-short}. If you have installed optional RPMs, the status of pods running those services is also expected to be shown in your output. +==== \ No newline at end of file diff --git a/snippets/network-flow-matrix.csv b/snippets/network-flow-matrix.csv new file mode 100644 index 000000000000..24066b38c8c9 --- /dev/null +++ b/snippets/network-flow-matrix.csv @@ -0,0 +1,67 @@ +Direction,Protocol,Port,Namespace,Service,Pod,Container,Node Role,Optional +Ingress,TCP,22,Host system service,sshd,,,master,TRUE +Ingress,TCP,53,openshift-dns,dns-default,dnf-default,dns,master,FALSE +Ingress,TCP,111,Host system service,rpcbind,,,master,TRUE +Ingress,TCP,2379,openshift-etcd,etcd,etcd,etcdctl,master,FALSE +Ingress,TCP,2380,openshift-etcd,healthz,etcd,etcd,master,FALSE +Ingress,TCP,5050,openshift-machine-api,,ironic-proxy,ironic-proxy,master,FALSE +Ingress,TCP,6080,openshift-kube-apiserver,,kube-apiserver,kube-apiserver-insecure-readyz,master,FALSE +Ingress,TCP,6385,openshift-machine-api,,ironic-proxy,ironic-proxy,master,FALSE +Ingress,TCP,6443,openshift-kube-apiserver,apiserver,kube-apiserver,kube-apiserver,master,FALSE +Ingress,TCP,8080,openshift-network-operator ,,network-operator,network-operator,master,FALSE +Ingress,TCP,8798,openshift-machine-config-operator,machine-config-daemon,machine-config-daemon,machine-config-daemon,master,FALSE +Ingress,TCP,9001,openshift-machine-config-operator,machine-config-daemon,machine-config-daemon,kube-rbac-proxy,master,FALSE +Ingress,TCP,9099,openshift-cluster-version,cluster-version-operator,cluster-version-operator,cluster-version-operator,master,FALSE +Ingress,TCP,9100,openshift-monitoring,node-exporter,node-exporter,kube-rbac-proxy,master,FALSE +Ingress,TCP,9103,openshift-ovn-kubernetes,ovn-kubernetes-node,ovnkube-node,kube-rbac-proxy-node,master,FALSE +Ingress,TCP,9104,openshift-network-operator,metrics,network-operator,network-operator,master,FALSE +Ingress,TCP,9105,openshift-ovn-kubernetes,ovn-kubernetes-node,ovnkube-node,kube-rbac-proxy-ovn-metrics,master,FALSE +Ingress,TCP,9107,openshift-ovn-kubernetes,egressip-node-healthcheck,ovnkube-node,ovnkube-controller,master,FALSE +Ingress,TCP,9108,openshift-ovn-kubernetes,ovn-kubernetes-control-plane,ovnkube-control-plane,kube-rbac-proxy,master,FALSE +Ingress,TCP,9192,openshift-cluster-machine-approver,machine-approver,machine-approver,kube-rbac-proxy,master,FALSE +Ingress,TCP,9258,openshift-cloud-controller-manager-operator,machine-approver,cluster-cloud-controller-manager,cluster-cloud-controller-manager,master,FALSE +Ingress,TCP,9444,openshift-kni-infra,,haproxy,haproxy,master,FALSE +Ingress,TCP,9445,openshift-kni-infra,,haproxy,haproxy,master,FALSE +Ingress,TCP,9447,openshift-machine-api,,metal3-baremetal-operator,,master,FALSE +Ingress,TCP,9537,Host system service,crio-metrics,,,master,FALSE +Ingress,TCP,9637,openshift-machine-config-operator,kube-rbac-proxy-crio,kube-rbac-proxy-crio,kube-rbac-proxy-crio,master,FALSE +Ingress,TCP,9978,openshift-etcd,etcd,etcd,etcd-metrics,master,FALSE +Ingress,TCP,9979,openshift-etcd,etcd,etcd,etcd-metrics,master,FALSE +Ingress,TCP,9980,openshift-etcd,etcd,etcd,etcd,master,FALSE +Ingress,TCP,10250,Host system service,kubelet,,,master,FALSE +Ingress,TCP,10256,openshift-ovn-kubernetes,ovnkube,ovnkube,ovnkube-controller,master,FALSE +Ingress,TCP,10257,openshift-kube-controller-manager,kube-controller-manager,kube-controller-manager,kube-controller-manager,master,FALSE +Ingress,TCP,10258,openshift-cloud-controller-manager-operator,cloud-controller,cloud-controller-manager,cloud-controller-manager,master,FALSE +Ingress,TCP,10259,openshift-kube-scheduler,scheduler,openshift-kube-scheduler,kube-scheduler,master,FALSE +Ingress,TCP,10260,openshift-cloud-controller-manager-operator,cloud-controller,cloud-controller-manager,cloud-controller-manager,master,FALSE +Ingress,TCP,10300,openshift-cluster-csi-drivers,csi-livenessprobe,csi-driver-node,csi-driver,master,FALSE +Ingress,TCP,10309,openshift-cluster-csi-drivers,csi-node-driver,csi-driver-node,csi-node-driver-registrar,master,FALSE +Ingress,TCP,10357,openshift-kube-apiserver,openshift-kube-apiserver-healthz,kube-apiserver,kube-apiserver-check-endpoints,master,FALSE +Ingress,TCP,17697,openshift-kube-apiserver,openshift-kube-apiserver-healthz,kube-apiserver,kube-apiserver-check-endpoints,master,FALSE +Ingress,TCP,18080,openshift-kni-infra,,coredns,coredns,master,FALSE +Ingress,TCP,22623,openshift-machine-config-operator,machine-config-server,machine-config-server,machine-config-server,master,FALSE +Ingress,TCP,22624,openshift-machine-config-operator,machine-config-server,machine-config-server,machine-config-server,master,FALSE +Ingress,UDP,53,openshift-dns,dns-default,dnf-default,dns,master,FALSE +Ingress,UDP,111,Host system service,rpcbind,,,master,TRUE +Ingress,UDP,6081,openshift-ovn-kubernetes,ovn-kubernetes geneve,,,master,FALSE +Ingress,TCP,22,Host system service,sshd,,,worker,TRUE +Ingress,TCP,53,openshift-dns,dns-default,dnf-default,dns,worker,FALSE +Ingress,TCP,80,openshift-ingress,router-default,router-default,router,worker,FALSE +Ingress,TCP,111,Host system service,rpcbind,,,worker,TRUE +Ingress,TCP,443,openshift-ingress,router-default,router-default,router,worker,FALSE +Ingress,TCP,8798,openshift-machine-config-operator,machine-config-daemon,machine-config-daemon,machine-config-daemon,worker,FALSE +Ingress,TCP,9001,openshift-machine-config-operator,machine-config-daemon,machine-config-daemon,kube-rbac-proxy,worker,FALSE +Ingress,TCP,9100,openshift-monitoring,node-exporter,node-exporter,kube-rbac-proxy,worker,FALSE +Ingress,TCP,9103,openshift-ovn-kubernetes,ovn-kubernetes-node,ovnkube-node,kube-rbac-proxy-node,worker,FALSE +Ingress,TCP,9105,openshift-ovn-kubernetes,ovn-kubernetes-node,ovnkube-node,kube-rbac-proxy-ovn-metrics,worker,FALSE +Ingress,TCP,9107,openshift-ovn-kubernetes,egressip-node-healthcheck,ovnkube-node,ovnkube-controller,worker,FALSE +Ingress,TCP,9537,Host system service,crio-metrics,,,worker,FALSE +Ingress,TCP,9637,openshift-machine-config-operator,kube-rbac-proxy-crio,kube-rbac-proxy-crio,kube-rbac-proxy-crio,worker,FALSE +Ingress,TCP,10250,Host system service,kubelet,,,worker,FALSE +Ingress,TCP,10256,openshift-ovn-kubernetes,ovnkube,ovnkube,ovnkube-controller,worker,TRUE +Ingress,TCP,10300,openshift-cluster-csi-drivers,csi-livenessprobe,csi-driver-node,csi-driver,worker,FALSE +Ingress,TCP,10309,openshift-cluster-csi-drivers,csi-node-driver-registrar,csi-driver-node,csi-node-driver-registrar,worker,FALSE +Ingress,TCP,18080,openshift-kni-infra,,coredns,coredns,worker,FALSE +Ingress,UDP,53,openshift-dns,dns-default,dnf-default,dns,worker,FALSE +Ingress,UDP,111,Host system service,rpcbind,,,worker,TRUE +Ingress,UDP,6081,openshift-ovn-kubernetes,ovn-kubernetes geneve,,,worker,FALSE \ No newline at end of file diff --git a/snippets/olmv1-cli-only.adoc b/snippets/olmv1-cli-only.adoc index 2408aedfad71..f53cb06dc950 100644 --- a/snippets/olmv1-cli-only.adoc +++ b/snippets/olmv1-cli-only.adoc @@ -1,7 +1,7 @@ // Text snippet included in the following modules: // // * operators/olm_v1/olmv1-installing-an-operator-from-a-catalog.adoc -// * operators/olm_v1/olmv1-managing-plain-bundles.adoc + :_mod-docs-content-type: SNIPPET diff --git a/snippets/olmv1-operator-api-group.adoc b/snippets/olmv1-operator-api-group.adoc deleted file mode 100644 index 61ddfb412011..000000000000 --- a/snippets/olmv1-operator-api-group.adoc +++ /dev/null @@ -1,17 +0,0 @@ -// Text snippet included in the following modules: -// -// * modules/olmv1-operator-api.adoc - -:_mod-docs-content-type: SNIPPET - -[NOTE] -==== -When using the OpenShift CLI (`oc`), the `Operator` resource provided with {olmv1} during this Technology Preview phase requires specifying the full `.` format: `operator.operators.operatorframework.io`. For example: - -[source,terminal] ----- -$ oc get operator.operators.operatorframework.io ----- - -If you specify only the `Operator` resource without the API group, the CLI returns results for an earlier API (`operator.operators.coreos.com`) that is unrelated to {olmv1}. -==== diff --git a/snippets/osdk-deprecation.adoc b/snippets/osdk-deprecation.adoc new file mode 100644 index 000000000000..32fa5a15f5ff --- /dev/null +++ b/snippets/osdk-deprecation.adoc @@ -0,0 +1,60 @@ +// Text snippet included in the following assemblies: +// * cli_reference/osdk/cli-osdk-install.adoc +// * cli_reference/osdk/cli-osdk-ref.adoc +// * operators/operator_sdk/ansible/osdk-ansible-cr-status.adoc +// * operators/operator_sdk/ansible/osdk-ansible-inside-operator.adoc +// * operators/operator_sdk/ansible/osdk-ansible-k8s-collection.adoc +// * operators/operator_sdk/ansible/osdk-ansible-project-layout.adoc +// * opearotors/operator_sdk/ansible/osdk-ansible-quickstart.adoc +// * operators/operator_sdk/ansible/osdk-ansible-support.adoc +// * operators/operator_sdk/ansible/osdk-ansible-tutorial.adoc +// * operators/ansible/osdk-ansible-updating-projects.adoc +// * operator/operator_sdk/golang/osdk-golang-project-layout.adoc +// * operators/operator_sdk/golang/osdk-golang-quickstart.adoc +// * operators/operator_sdk/golang/osdk-golang-tutorial.adoc +// * operators/operator_sdk/golang/osdk-golang-updating-projects.adoc +// * operators/operator_sdk/helm/osdk-helm-project-layout.adoc +// * operators/operator_sdk/helm/osdk-helm-quickstart.adoc +// * operators/operator_sdk/helm/osdk-helm-support.adoc +// * operators/operator_sdk/helm/osdk-helm-tutorial.adoc +// * operators/operator_sdk/helm/osdk-helm-updating-projects.adoc +// * operators/operator_sdk/helm/osdk-hybrid-helm-updating-projects.adoc +// * operators/operator_sdk/helm/osdk-hybrid-helm.adoc +// * operators/operator_sdk/java/osdk-java-project-layout.adoc +// * operators/operator_sdk/java/osdk-java-quickstart.adoc +// * operators/operator_sdk/java/osdk-java-tutorial.adoc +// * operators/operator_sdk/java/osdk-java-updating-projects.adoc +// * operators/operator_sdk/osdk-about.adoc +// * operators/operator_sdk/osdk-bundle-validate.adoc +// * operators/operator_sdk/osdk-cli-ref.adoc +// * operators/operator_sdk/osdk-complying-with-psa.adoc +// * operators/operator_sdk/osdk-generating-csvs.adoc +// * operators/operator_sdk/osdk-ha-sno.adoc +// * operators/operator_sdk/osdk-installing-cli.adoc +// * operators/operator_sdk/osdk-leader-election.adoc +// * operators/operator_sdk/osdk-migrating-to-v0-1-0.adoc +// * operators/operator_sdk/osdk-monitoring-prometheus.adoc +// * operators/operator_sdk/osdk-multi-arch-support.adoc +// * operators/operator_sdk/osdk-pkgman-to-bundle.adoc +// * operators/operator_sdk/osdk-pruning-utility.adoc +// * operators/operator_sdk/osdk-scorecard.adoc +// * operators/operator_sdk/osdk-working-bundle-images.adoc + +[IMPORTANT] +==== +[subs="attributes+"] +The Red{nbsp}Hat-supported version of the Operator SDK CLI tool, including the related scaffolding and testing tools for Operator projects, is deprecated and is planned to be removed in a future release of {product-title}. Red{nbsp}Hat will provide bug fixes and support for this feature during the current release lifecycle, but this feature will no longer receive enhancements and will be removed from future {product-title} releases. + +The Red{nbsp}Hat-supported version of the Operator SDK is not recommended for creating new Operator projects. Operator authors with existing Operator projects can use the version of the Operator SDK CLI tool released with {product-title} {product-version} to maintain their projects and create Operator releases targeting newer versions of {product-title}. + +The following related base images for Operator projects are _not_ deprecated. The runtime functionality and configuration APIs for these base images are still supported for bug fixes and for addressing CVEs. + +* The base image for Ansible-based Operator projects +* The base image for Helm-based Operator projects + +ifndef::openshift-rosa,openshift-dedicated[] +For the most recent list of major functionality that has been deprecated or removed within {product-title}, refer to the _Deprecated and removed features_ section of the {product-title} release notes. +endif::openshift-rosa,openshift-dedicated[] + +For information about the unsupported, community-maintained, version of the Operator SDK, see link:https://sdk.operatorframework.io[Operator SDK (Operator Framework)]. +==== diff --git a/snippets/ossm-current-version-support-snippet.adoc b/snippets/ossm-current-version-support-snippet.adoc new file mode 100644 index 000000000000..0a600c68061f --- /dev/null +++ b/snippets/ossm-current-version-support-snippet.adoc @@ -0,0 +1,7 @@ +// Snippets included in the following assemblies and modules: +// +// * service_mesh/v2x/ossm-rn-new-features.adoc + +:_mod-docs-content-type: SNIPPET + +The most current version of the {SMProductName} Operator can be used with all supported versions of {SMProductShortName}. The version of {SMProductShortName} is specified using the `ServiceMeshControlPlane`. diff --git a/snippets/pg-cnf-topology-aware-lifecycle-manager-operator-troubleshooting.adoc b/snippets/pg-cnf-topology-aware-lifecycle-manager-operator-troubleshooting.adoc new file mode 100644 index 000000000000..d05e85c49b4d --- /dev/null +++ b/snippets/pg-cnf-topology-aware-lifecycle-manager-operator-troubleshooting.adoc @@ -0,0 +1,33 @@ +[source,yaml] +---- +manifests: +- path: source-crs/DefaultCatsrc.yaml + patches: + - metadata: + name: redhat-operators + spec: + displayName: Red Hat Operators Catalog + image: registry.example.com:5000/olm/redhat-operators:v{product-version} + updateStrategy: + registryPoll: + interval: 1h + status: + connectionState: + lastObservedState: READY +- path: source-crs/DefaultCatsrc.yaml + patches: + - metadata: + name: redhat-operators-v2 <1> + spec: + displayName: Red Hat Operators Catalog v2 <2> + image: registry.example.com:5000/olredhat-operators: <3> + updateStrategy: + registryPoll: + interval: 1h + status: + connectionState: + lastObservedState: READY +---- +<1> Update the name for the new configuration. +<2> Update the display name for the new configuration. +<3> Update the index image URL. This `policies.manifests.patches.spec.image` field overrides any configuration in the `DefaultCatsrc.yaml` file. diff --git a/snippets/pg-cnf-topology-aware-lifecycle-manager-operator-update.adoc b/snippets/pg-cnf-topology-aware-lifecycle-manager-operator-update.adoc new file mode 100644 index 000000000000..5154ed1c01b7 --- /dev/null +++ b/snippets/pg-cnf-topology-aware-lifecycle-manager-operator-update.adoc @@ -0,0 +1,48 @@ +:_mod-docs-content-type: SNIPPET +[source,yaml,subs="attributes+"] +---- +apiVersion: policy.open-cluster-management.io/v1 +kind: PolicyGenerator +metadata: + name: du-upgrade +placementBindingDefaults: + name: du-upgrade-placement-binding +policyDefaults: + namespace: ztp-group-du-sno + placement: + labelSelector: + matchExpressions: + - key: group-du-sno + operator: Exists + remediationAction: inform + severity: low + namespaceSelector: + exclude: + - kube-* + include: + - '*' + evaluationInterval: + compliant: 10m + noncompliant: 10s +policies: + - name: du-upgrade-operator-catsrc-policy + policyAnnotations: + ran.openshift.io/ztp-deploy-wave: "1" + manifests: + - path: source-crs/DefaultCatsrc.yaml + patches: + - metadata: + name: redhat-operators + spec: + displayName: Red Hat Operators Catalog + image: registry.example.com:5000/olm/redhat-operators:v{product-version} <1> + updateStrategy: <2> + registryPoll: + interval: 1h + status: + connectionState: + lastObservedState: READY <3> +---- +<1> Contains the required Operator images. If the index images are always pushed to the same image name and tag, this change is not needed. +<2> Sets how frequently the Operator Lifecycle Manager (OLM) polls the index image for new Operator versions with the `registryPoll.interval` field. This change is not needed if a new index image tag is always pushed for y-stream and z-stream Operator updates. The `registryPoll.interval` field can be set to a shorter interval to expedite the update, however shorter intervals increase computational load. To counteract this, you can restore `registryPoll.interval` to the default value once the update is complete. +<3> Displays the observed state of the catalog connection. The `READY` value ensures that the `CatalogSource` policy is ready, indicating that the index pod is pulled and is running. This way, {cgu-operator} upgrades the Operators based on up-to-date policy compliance states. diff --git a/snippets/pg-cnf-topology-aware-lifecycle-manager-pao-update.yaml b/snippets/pg-cnf-topology-aware-lifecycle-manager-pao-update.yaml new file mode 100644 index 000000000000..df38008fdb39 --- /dev/null +++ b/snippets/pg-cnf-topology-aware-lifecycle-manager-pao-update.yaml @@ -0,0 +1,7 @@ +- name: group-du-sno-pg-subscriptions-policy + policyAnnotations: + ran.openshift.io/ztp-deploy-wave: "2" + manifests: + - path: source-crs/PaoSubscriptionNS.yaml + - path: source-crs/PaoSubscriptionOperGroup.yaml + - path: source-crs/PaoSubscription.yaml \ No newline at end of file diff --git a/snippets/pg-cnf-topology-aware-lifecycle-manager-platform-update.adoc b/snippets/pg-cnf-topology-aware-lifecycle-manager-platform-update.adoc new file mode 100644 index 000000000000..cb07a97e7207 --- /dev/null +++ b/snippets/pg-cnf-topology-aware-lifecycle-manager-platform-update.adoc @@ -0,0 +1,66 @@ +:_mod-docs-content-type: SNIPPET +[source,yaml,subs="attributes+"] +---- +apiVersion: policy.open-cluster-management.io/v1 +kind: PolicyGenerator +metadata: + name: du-upgrade +placementBindingDefaults: + name: du-upgrade-placement-binding +policyDefaults: + namespace: ztp-group-du-sno + placement: + labelSelector: + matchExpressions: + - key: group-du-sno + operator: Exists + remediationAction: inform + severity: low + namespaceSelector: + exclude: + - kube-* + include: + - '*' + evaluationInterval: + compliant: 10m + noncompliant: 10s +policies: + - name: du-upgrade-platform-upgrade + policyAnnotations: + ran.openshift.io/ztp-deploy-wave: "100" + manifests: + - path: source-crs/ClusterVersion.yaml <1> + patches: + - metadata: + name: version + spec: + channel: stable-{product-version} + desiredUpdate: + version: {product-version}.4 + upstream: http://upgrade.example.com/images/upgrade-graph_stable-{product-version} + status: + history: + - state: Completed + version: {product-version}.4 + - name: du-upgrade-platform-upgrade-prep + policyAnnotations: + ran.openshift.io/ztp-deploy-wave: "1" + manifests: + - path: source-crs/ImageSignature.yaml <2> + - path: source-crs/DisconnectedICSP.yaml + patches: + - metadata: + name: disconnected-internal-icsp-for-ocp + spec: + repositoryDigestMirrors: <3> + - mirrors: + - quay-intern.example.com/ocp4/openshift-release-dev + source: quay.io/openshift-release-dev/ocp-release + - mirrors: + - quay-intern.example.com/ocp4/openshift-release-dev + source: quay.io/openshift-release-dev/ocp-v4.0-art-dev +---- +<1> Shows the `ClusterVersion` CR to trigger the update. The `channel`, `upstream`, and `desiredVersion` fields are all required for image pre-caching. +<2> `ImageSignature.yaml` contains the image signature of the required release image. The image signature is used to verify the image before applying the platform update. +<3> Shows the mirror repository that contains the required {product-title} image. Get the mirrors from the `imageContentSources.yaml` file that you saved when following the procedures in the "Setting up the environment" section. + diff --git a/snippets/pg-group-du-sno-config-policy.yaml b/snippets/pg-group-du-sno-config-policy.yaml new file mode 100644 index 000000000000..db03631e73ab --- /dev/null +++ b/snippets/pg-group-du-sno-config-policy.yaml @@ -0,0 +1,42 @@ +--- +apiVersion: policy.open-cluster-management.io/v1 +kind: PolicyGenerator +metadata: + name: du-upgrade +placementBindingDefaults: + name: du-upgrade-placement-binding +policyDefaults: + namespace: ztp-group-du-sno + placement: + labelSelector: + matchExpressions: + - key: group-du-sno + operator: Exists + remediationAction: inform + severity: low + namespaceSelector: + exclude: + - kube-* + include: + - '*' + evaluationInterval: + compliant: 10m + noncompliant: 10s +policies: + - name: du-upgrade-operator-catsrc-policy + policyAnnotations: + ran.openshift.io/ztp-deploy-wave: "1" + manifests: + - path: source-crs/DefaultCatsrc.yaml + patches: + - metadata: + name: redhat-operators + spec: + displayName: Red Hat Operators Catalog + image: registry.example.com:5000/olm/redhat-operators:v4.14 + updateStrategy: + registryPoll: + interval: 1h + status: + connectionState: + lastObservedState: READY diff --git a/snippets/pg-sriov-fec-cnf-topology-aware-lifecycle-manager-operator-update.adoc b/snippets/pg-sriov-fec-cnf-topology-aware-lifecycle-manager-operator-update.adoc new file mode 100644 index 000000000000..bb09d4e08933 --- /dev/null +++ b/snippets/pg-sriov-fec-cnf-topology-aware-lifecycle-manager-operator-update.adoc @@ -0,0 +1,51 @@ +:_mod-docs-content-type: SNIPPET +[source,yaml] +---- +apiVersion: policy.open-cluster-management.io/v1 +kind: PolicyGenerator +metadata: + name: du-upgrade +placementBindingDefaults: + name: du-upgrade-placement-binding +policyDefaults: + namespace: ztp-group-du-sno + placement: + labelSelector: + matchExpressions: + - key: group-du-sno + operator: Exists + remediationAction: inform + severity: low + namespaceSelector: + exclude: + - kube-* + include: + - '*' + evaluationInterval: + compliant: 10m + noncompliant: 10s +policies: + - name: du-upgrade-fec-catsrc-policy + policyAnnotations: + ran.openshift.io/ztp-deploy-wave: "1" + manifests: + - path: source-crs/DefaultCatsrc.yaml + patches: + - metadata: + name: certified-operators + spec: + displayName: Intel SRIOV-FEC Operator + image: registry.example.com:5000/olm/far-edge-sriov-fec:v4.10 + updateStrategy: + registryPoll: + interval: 10m + - name: du-upgrade-subscriptions-fec-policy + policyAnnotations: + ran.openshift.io/ztp-deploy-wave: "2" + manifests: + - path: source-crs/AcceleratorsSubscription.yaml + patches: + - spec: + channel: stable + source: certified-operators +---- diff --git a/snippets/pg-using-ztp-to-update-source-crs.yaml b/snippets/pg-using-ztp-to-update-source-crs.yaml new file mode 100644 index 000000000000..cc23f89dfc41 --- /dev/null +++ b/snippets/pg-using-ztp-to-update-source-crs.yaml @@ -0,0 +1,13 @@ +- path: source-crs/PerformanceProfile.yaml + patches: + - spec: + # These must be tailored for the specific hardware platform + cpu: + isolated: "2-19,22-39" + reserved: "0-1,20-21" + hugepages: + defaultHugepagesSize: 1G + pages: + - size: 1G + count: 10 + globallyDisableIrqLoadBalancing: false diff --git a/snippets/pg-ztp-adding-new-content-to-gitops-ztp-folder-structure.adoc b/snippets/pg-ztp-adding-new-content-to-gitops-ztp-folder-structure.adoc new file mode 100644 index 000000000000..da3df80a080b --- /dev/null +++ b/snippets/pg-ztp-adding-new-content-to-gitops-ztp-folder-structure.adoc @@ -0,0 +1,20 @@ +:_mod-docs-content-type: SNIPPET +[source,terminal] +---- +example +└── acmpolicygenerator + ├── dev.yaml + ├── kustomization.yaml + ├── mec-edge-sno1.yaml + ├── sno.yaml + └── source-crs <1> + ├── PaoCatalogSource.yaml + ├── PaoSubscription.yaml + ├── custom-crs + | ├── apiserver-config.yaml + | └── disable-nic-lldp.yaml + └── elasticsearch + ├── ElasticsearchNS.yaml + └── ElasticsearchOperatorGroup.yaml +---- +<1> The `source-crs` subdirectory must be in the same directory as the `kustomization.yaml` file. diff --git a/snippets/pg-ztp-adding-new-content-to-gitops-ztp.adoc b/snippets/pg-ztp-adding-new-content-to-gitops-ztp.adoc new file mode 100644 index 000000000000..7dd94da67b88 --- /dev/null +++ b/snippets/pg-ztp-adding-new-content-to-gitops-ztp.adoc @@ -0,0 +1,99 @@ +:_mod-docs-content-type: SNIPPET +[source,yaml] +---- +apiVersion: policy.open-cluster-management.io/v1 +kind: PolicyGenerator +metadata: + name: group-dev +placementBindingDefaults: + name: group-dev-placement-binding +policyDefaults: + namespace: ztp-clusters + placement: + labelSelector: + matchExpressions: + - key: dev + operator: In + values: + - "true" + remediationAction: inform + severity: low + namespaceSelector: + exclude: + - kube-* + include: + - '*' + evaluationInterval: + compliant: 10m + noncompliant: 10s +policies: + - name: group-dev-group-dev-cluster-log-ns + policyAnnotations: + ran.openshift.io/ztp-deploy-wave: "2" + manifests: + - path: source-crs/ClusterLogNS.yaml + - name: group-dev-group-dev-cluster-log-operator-group + policyAnnotations: + ran.openshift.io/ztp-deploy-wave: "2" + manifests: + - path: source-crs/ClusterLogOperGroup.yaml + - name: group-dev-group-dev-cluster-log-sub + policyAnnotations: + ran.openshift.io/ztp-deploy-wave: "2" + manifests: + - path: source-crs/ClusterLogSubscription.yaml + - name: group-dev-group-dev-lso-ns + policyAnnotations: + ran.openshift.io/ztp-deploy-wave: "2" + manifests: + - path: source-crs/StorageNS.yaml + - name: group-dev-group-dev-lso-operator-group + policyAnnotations: + ran.openshift.io/ztp-deploy-wave: "2" + manifests: + - path: source-crs/StorageOperGroup.yaml + - name: group-dev-group-dev-lso-sub + policyAnnotations: + ran.openshift.io/ztp-deploy-wave: "2" + manifests: + - path: source-crs/StorageSubscription.yaml + - name: group-dev-group-dev-pao-cat-source + policyAnnotations: + ran.openshift.io/ztp-deploy-wave: "1" + manifests: + - path: source-crs/PaoSubscriptionCatalogSource.yaml + patches: + - spec: + image: + - name: group-dev-group-dev-pao-ns + policyAnnotations: + ran.openshift.io/ztp-deploy-wave: "2" + manifests: + - path: source-crs/PaoSubscriptionNS.yaml + - name: group-dev-group-dev-pao-sub + policyAnnotations: + ran.openshift.io/ztp-deploy-wave: "2" + manifests: + - path: source-crs/PaoSubscription.yaml + - name: group-dev-group-dev-elasticsearch-ns + policyAnnotations: + ran.openshift.io/ztp-deploy-wave: "2" + manifests: + - path: elasticsearch/ElasticsearchNS.yaml <1> + - name: group-dev-group-dev-elasticsearch-operator-group + policyAnnotations: + ran.openshift.io/ztp-deploy-wave: "2" + manifests: + - path: elasticsearch/ElasticsearchOperatorGroup.yaml + - name: group-dev-group-dev-apiserver-config + policyAnnotations: + ran.openshift.io/ztp-deploy-wave: "2" + manifests: + - path: custom-crs/apiserver-config.yaml <1> + - name: group-dev-group-dev-disable-nic-lldp + policyAnnotations: + ran.openshift.io/ztp-deploy-wave: "2" + manifests: + - path: custom-crs/disable-nic-lldp.yaml +---- +<1> Set `policies.manifests.path` to include the relative path to the file from the `/source-crs` parent directory. diff --git a/snippets/pg-ztp-configuring-hwevents-using-pgt-hardware-event.adoc b/snippets/pg-ztp-configuring-hwevents-using-pgt-hardware-event.adoc new file mode 100644 index 000000000000..3357dd558705 --- /dev/null +++ b/snippets/pg-ztp-configuring-hwevents-using-pgt-hardware-event.adoc @@ -0,0 +1,11 @@ +:_mod-docs-content-type: SNIPPET +[source,yaml] +---- +- path: source-crs/HardwareEvent.yaml <1> + patches: + - spec: + logLevel: debug + nodeSelector: {} + transportHost: http://hw-event-publisher-service.openshift-bare-metal-events.svc.cluster.local:9043 +---- +<1> Each baseboard management controller (BMC) requires a single `HardwareEvent` CR only. diff --git a/snippets/pg-ztp-configuring-hwevents.yaml b/snippets/pg-ztp-configuring-hwevents.yaml new file mode 100644 index 000000000000..531daddd6015 --- /dev/null +++ b/snippets/pg-ztp-configuring-hwevents.yaml @@ -0,0 +1,4 @@ +# Bare Metal Event Relay Operator +- path: source-crs/BareMetalEventRelaySubscriptionNS.yaml +- path: source-crs/BareMetalEventRelaySubscriptionOperGroup.yaml +- path: source-crs/BareMetalEventRelaySubscription.yaml \ No newline at end of file diff --git a/snippets/pg-ztp-configuring-ptp-fast-events-amqp-transport.yaml b/snippets/pg-ztp-configuring-ptp-fast-events-amqp-transport.yaml new file mode 100644 index 000000000000..ff0ecff6664c --- /dev/null +++ b/snippets/pg-ztp-configuring-ptp-fast-events-amqp-transport.yaml @@ -0,0 +1,13 @@ +- path: source-crs/PtpOperatorConfigForEvent.yaml + patches: + - metadata: + name: default + namespace: openshift-ptp + annotations: + ran.openshift.io/ztp-deploy-wave: "10" + spec: + daemonNodeSelector: + node-role.kubernetes.io/$mcp: "" + ptpEventConfig: + enableEventPublisher: true + transportHost: "amqp://amq-router.amq-router.svc.cluster.local" diff --git a/snippets/pg-ztp-configuring-ptp-fast-events-amqp.yaml b/snippets/pg-ztp-configuring-ptp-fast-events-amqp.yaml new file mode 100644 index 000000000000..7e2bbd00c113 --- /dev/null +++ b/snippets/pg-ztp-configuring-ptp-fast-events-amqp.yaml @@ -0,0 +1,4 @@ +#AMQ Interconnect Operator for fast events +- path: source-crs/AmqSubscriptionNS.yaml +- path: source-crs/AmqSubscriptionOperGroup.yaml +- path: source-crs/AmqSubscription.yaml \ No newline at end of file diff --git a/snippets/pg-ztp-configuring-ptp-fast-events-linuxptp.adoc b/snippets/pg-ztp-configuring-ptp-fast-events-linuxptp.adoc new file mode 100644 index 000000000000..a93404d2fca3 --- /dev/null +++ b/snippets/pg-ztp-configuring-ptp-fast-events-linuxptp.adoc @@ -0,0 +1,135 @@ +:_mod-docs-content-type: SNIPPET +[source,yaml] +---- +- path: source-crs/PtpConfigSlave.yaml <1> + patches: + - metadata: + name: "du-ptp-slave" + spec: + recommend: + - match: + - nodeLabel: node-role.kubernetes.io/master + priority: 4 + profile: slave + profile: + - name: "slave" + # This interface must match the hardware in this group + interface: "ens5f0" <2> + ptp4lOpts: "-2 -s --summary_interval -4" <3> + phc2sysOpts: "-a -r -n 24" <4> + ptpSchedulingPolicy: SCHED_FIFO + ptpSchedulingPriority: 10 + ptpSettings: + logReduce: "true" + ptp4lConf: | + [global] + # + # Default Data Set + # + twoStepFlag 1 + slaveOnly 1 + priority1 128 + priority2 128 + domainNumber 24 + #utc_offset 37 + clockClass 255 + clockAccuracy 0xFE + offsetScaledLogVariance 0xFFFF + free_running 0 + freq_est_interval 1 + dscp_event 0 + dscp_general 0 + dataset_comparison G.8275.x + G.8275.defaultDS.localPriority 128 + # + # Port Data Set + # + logAnnounceInterval -3 + logSyncInterval -4 + logMinDelayReqInterval -4 + logMinPdelayReqInterval -4 + announceReceiptTimeout 3 + syncReceiptTimeout 0 + delayAsymmetry 0 + fault_reset_interval -4 + neighborPropDelayThresh 20000000 + masterOnly 0 + G.8275.portDS.localPriority 128 + # + # Run time options + # + assume_two_step 0 + logging_level 6 + path_trace_enabled 0 + follow_up_info 0 + hybrid_e2e 0 + inhibit_multicast_service 0 + net_sync_monitor 0 + tc_spanning_tree 0 + tx_timestamp_timeout 50 + unicast_listen 0 + unicast_master_table 0 + unicast_req_duration 3600 + use_syslog 1 + verbose 0 + summary_interval 0 + kernel_leap 1 + check_fup_sync 0 + clock_class_threshold 7 + # + # Servo Options + # + pi_proportional_const 0.0 + pi_integral_const 0.0 + pi_proportional_scale 0.0 + pi_proportional_exponent -0.3 + pi_proportional_norm_max 0.7 + pi_integral_scale 0.0 + pi_integral_exponent 0.4 + pi_integral_norm_max 0.3 + step_threshold 2.0 + first_step_threshold 0.00002 + max_frequency 900000000 + clock_servo pi + sanity_freq_limit 200000000 + ntpshm_segment 0 + # + # Transport options + # + transportSpecific 0x0 + ptp_dst_mac 01:1B:19:00:00:00 + p2p_dst_mac 01:80:C2:00:00:0E + udp_ttl 1 + udp6_scope 0x0E + uds_address /var/run/ptp4l + # + # Default interface options + # + clock_type OC + network_transport L2 + delay_mechanism E2E + time_stamping hardware + tsproc_mode filter + delay_filter moving_median + delay_filter_length 10 + egressLatency 0 + ingressLatency 0 + boundary_clock_jbod 0 + # + # Clock description + # + productDescription ;; + revisionData ;; + manufacturerIdentity 00:00:00 + userDescription ; + timeSource 0xA0 + ptpClockThreshold: <5> + holdOverTimeout: 30 # seconds + maxOffsetThreshold: 100 # nano seconds + minOffsetThreshold: -100 +---- +<1> Can be one of `PtpConfigMaster.yaml`, `PtpConfigSlave.yaml`, or `PtpConfigSlaveCvl.yaml` depending on your requirements. `PtpConfigSlaveCvl.yaml` configures `linuxptp` services for an Intel E810 Columbiaville NIC. For configurations based on `{policy-prefix}group-du-sno-ranGen.yaml` or `{policy-prefix}group-du-3node-ranGen.yaml`, use `PtpConfigSlave.yaml`. +<2> Device specific interface name. +<3> You must append the `--summary_interval -4` value to `ptp4lOpts` in `.spec.sourceFiles.spec.profile` to enable PTP fast events. +<4> Required `phc2sysOpts` values. `-m` prints messages to `stdout`. The `linuxptp-daemon` `DaemonSet` parses the logs and generates Prometheus metrics. +<5> Optional. If the `ptpClockThreshold` stanza is not present, default values are used for the `ptpClockThreshold` fields. The stanza shows default `ptpClockThreshold` values. The `ptpClockThreshold` values configure how long after the PTP master clock is disconnected before PTP events are triggered. `holdOverTimeout` is the time value in seconds before the PTP clock event state changes to `FREERUN` when the PTP master clock is disconnected. The `maxOffsetThreshold` and `minOffsetThreshold` settings configure offset values in nanoseconds that compare against the values for `CLOCK_REALTIME` (`phc2sys`) or master offset (`ptp4l`). When the `ptp4l` or `phc2sys` offset value is outside this range, the PTP clock state is set to `FREERUN`. When the offset value is within this range, the PTP clock state is set to `LOCKED`. diff --git a/snippets/pg-ztp-configuring-ptp-fast-events.yaml b/snippets/pg-ztp-configuring-ptp-fast-events.yaml new file mode 100644 index 000000000000..6bfe38d92e6d --- /dev/null +++ b/snippets/pg-ztp-configuring-ptp-fast-events.yaml @@ -0,0 +1,13 @@ +- path: source-crs/PtpOperatorConfigForEvent.yaml + patches: + - metadata: + name: default + namespace: openshift-ptp + annotations: + ran.openshift.io/ztp-deploy-wave: "10" + spec: + daemonNodeSelector: + node-role.kubernetes.io/$mcp: "" + ptpEventConfig: + enableEventPublisher: true + transportHost: "http://ptp-event-publisher-service-NODE_NAME.openshift-ptp.svc.cluster.local:9043" diff --git a/snippets/pg-ztp-creating-hwevents-amqp.yaml b/snippets/pg-ztp-creating-hwevents-amqp.yaml new file mode 100644 index 000000000000..91fad4336b8b --- /dev/null +++ b/snippets/pg-ztp-creating-hwevents-amqp.yaml @@ -0,0 +1,8 @@ +# AMQ Interconnect Operator for fast events +- path: source-crs/AmqSubscriptionNS.yaml +- path: source-crs/AmqSubscriptionOperGroup.yaml +- path: source-crs/AmqSubscription.yaml +# Bare Metal Event Relay Operator +- path: source-crs/BareMetalEventRelaySubscriptionNS.yaml +- path: source-crs/BareMetalEventRelaySubscriptionOperGroup.yaml +- path: source-crs/BareMetalEventRelaySubscription.yaml \ No newline at end of file diff --git a/snippets/pg-ztp-example-single-node-cluster-validator.adoc b/snippets/pg-ztp-example-single-node-cluster-validator.adoc new file mode 100644 index 000000000000..efd43e7f010f --- /dev/null +++ b/snippets/pg-ztp-example-single-node-cluster-validator.adoc @@ -0,0 +1,41 @@ +.Example single-node cluster validator inform policy CR (acm-group-du-sno-validator-ranGen.yaml) +[source,yaml] +---- +apiVersion: policy.open-cluster-management.io/v1 +kind: PolicyGenerator +metadata: + name: group-du-sno-validator-latest +placementBindingDefaults: + name: group-du-sno-validator-latest-placement-binding +policyDefaults: + namespace: ztp-group + placement: + labelSelector: + matchExpressions: + - key: du-profile + operator: In + values: + - latest + - key: group-du-sno + operator: Exists + - key: ztp-done + operator: DoesNotExist + remediationAction: inform + severity: low + namespaceSelector: + exclude: + - kube-* + include: + - '*' + evaluationInterval: + compliant: 10m + noncompliant: 10s +policies: + - name: group-du-sno-validator-latest-du-policy + policyAnnotations: + ran.openshift.io/ztp-deploy-wave: "10000" + evaluationInterval: + compliant: 5s + manifests: + - path: source-crs/validatorCRs/informDuValidator-MCP-master.yaml +---- diff --git a/snippets/pg-ztp-provisioning-lvm-storage-cluster.adoc b/snippets/pg-ztp-provisioning-lvm-storage-cluster.adoc new file mode 100644 index 000000000000..47c4bb1ea633 --- /dev/null +++ b/snippets/pg-ztp-provisioning-lvm-storage-cluster.adoc @@ -0,0 +1,16 @@ +:_mod-docs-content-type: SNIPPET +[source,yaml] +---- +- fileName: StorageLVMCluster.yaml + policyName: "lvms-config" + metadata: + name: "lvms-storage-cluster-config" + spec: + storage: + deviceClasses: + - name: vg1 + thinPoolConfig: + name: thin-pool-1 + sizePercent: 90 + overprovisionRatio: 10 +---- diff --git a/snippets/pg-ztp-provisioning-lvm-storage-sub.yaml b/snippets/pg-ztp-provisioning-lvm-storage-sub.yaml new file mode 100644 index 000000000000..359d1d2497c6 --- /dev/null +++ b/snippets/pg-ztp-provisioning-lvm-storage-sub.yaml @@ -0,0 +1,3 @@ +- path: source-crs/StorageLVMSubscriptionNS.yaml +- path: source-crs/StorageLVMSubscriptionOperGroup.yaml +- path: source-crs/StorageLVMSubscription.yaml diff --git a/snippets/pg-ztp-provisioning-lvm-storage.adoc b/snippets/pg-ztp-provisioning-lvm-storage.adoc new file mode 100644 index 000000000000..62c3d6f59057 --- /dev/null +++ b/snippets/pg-ztp-provisioning-lvm-storage.adoc @@ -0,0 +1,14 @@ +:_mod-docs-content-type: SNIPPET +[source,yaml,subs="attributes+"] +---- +- name: subscription-policies + policyAnnotations: + ran.openshift.io/ztp-deploy-wave: "2" + manifests: + - path: source-crs/StorageLVMOSubscriptionNS.yaml + - path: source-crs/StorageLVMOSubscriptionOperGroup.yaml + - path: source-crs/StorageLVMOSubscription.yaml + spec: + name: lvms-operator + channel: stable-{product-version} +---- diff --git a/snippets/pg-ztp-specifying-nics-in-pgt-hub-cluster-templates.yaml b/snippets/pg-ztp-specifying-nics-in-pgt-hub-cluster-templates.yaml new file mode 100644 index 000000000000..78055287b9e6 --- /dev/null +++ b/snippets/pg-ztp-specifying-nics-in-pgt-hub-cluster-templates.yaml @@ -0,0 +1,101 @@ +--- +apiVersion: policy.open-cluster-management.io/v1 +kind: PolicyGenerator +metadata: + name: group-du-sno-pgt +placementBindingDefaults: + name: group-du-sno-pgt-placement-binding +policyDefaults: + placement: + labelSelector: + matchExpressions: + - key: group-du-sno-zone + operator: In + values: + - zone-1 + - key: hardware-type + operator: In + values: + - hardware-type-1 + remediationAction: inform + severity: low + namespaceSelector: + exclude: + - kube-* + include: + - '*' + evaluationInterval: + compliant: 10m + noncompliant: 10s +policies: + - name: group-du-sno-pgt-group-du-sno-cfg-policy + policyAnnotations: + ran.openshift.io/ztp-deploy-wave: "10" + manifests: + - path: source-crs/ClusterLogForwarder.yaml + patches: + - spec: + outputs: '{{hub fromConfigMap "" "group-zones-configmap" (printf "%s-cluster-log-fwd-outputs" (index .ManagedClusterLabels "group-du-sno-zone")) | toLiteral hub}}' + pipelines: '{{hub fromConfigMap "" "group-zones-configmap" (printf "%s-cluster-log-fwd-pipelines" (index .ManagedClusterLabels "group-du-sno-zone")) | toLiteral hub}}' + - path: source-crs/PerformanceProfile-MCP-master.yaml + patches: + - metadata: + name: openshift-node-performance-profile + spec: + additionalKernelArgs: + - rcupdate.rcu_normal_after_boot=0 + - vfio_pci.enable_sriov=1 + - vfio_pci.disable_idle_d3=1 + - efi=runtime + cpu: + isolated: '{{hub fromConfigMap "" "group-hardware-types-configmap" (printf "%s-cpu-isolated" (index .ManagedClusterLabels "hardware-type")) hub}}' + reserved: '{{hub fromConfigMap "" "group-hardware-types-configmap" (printf "%s-cpu-reserved" (index .ManagedClusterLabels "hardware-type")) hub}}' + hugepages: + defaultHugepagesSize: '{{hub fromConfigMap "" "group-hardware-types-configmap" (printf "%s-hugepages-default" (index .ManagedClusterLabels "hardware-type")) hub}}' + pages: + - count: '{{hub fromConfigMap "" "group-hardware-types-configmap" (printf "%s-hugepages-count" (index .ManagedClusterLabels "hardware-type")) | toInt hub}}' + size: '{{hub fromConfigMap "" "group-hardware-types-configmap" (printf "%s-hugepages-size" (index .ManagedClusterLabels "hardware-type")) hub}}' + realTimeKernel: + enabled: true + - name: group-du-sno-pgt-group-du-sno-sriov-policy + policyAnnotations: + ran.openshift.io/ztp-deploy-wave: "100" + manifests: + - path: source-crs/SriovNetwork.yaml + patches: + - metadata: + name: sriov-nw-du-fh + spec: + resourceName: du_fh + vlan: '{{hub fromConfigMap "" "site-data-configmap" (printf "%s-sriov-network-vlan-1" .ManagedClusterName) | toInt hub}}' + - path: source-crs/SriovNetworkNodePolicy-MCP-master.yaml + patches: + - metadata: + name: sriov-nnp-du-fh + spec: + deviceType: netdevice + isRdma: false + nicSelector: + pfNames: '{{hub fromConfigMap "" "group-hardware-types-configmap" (printf "%s-sriov-node-policy-pfNames-1" (index .ManagedClusterLabels "hardware-type")) | toLiteral hub}}' + numVfs: 8 + priority: 10 + resourceName: du_fh + - path: source-crs/SriovNetwork.yaml + patches: + - metadata: + name: sriov-nw-du-mh + spec: + resourceName: du_mh + vlan: '{{hub fromConfigMap "" "site-data-configmap" (printf "%s-sriov-network-vlan-2" .ManagedClusterName) | toInt hub}}' + - path: source-crs/SriovNetworkNodePolicy-MCP-master.yaml + patches: + - metadata: + name: sriov-nw-du-fh + spec: + deviceType: netdevice + isRdma: false + nicSelector: + pfNames: '{{hub fromConfigMap "" "group-hardware-types-configmap" (printf "%s-sriov-node-policy-pfNames-2" (index .ManagedClusterLabels "hardware-type")) | toLiteral hub}}' + numVfs: 8 + priority: 10 + resourceName: du_fh diff --git a/snippets/pg-ztp-the-policygenerator.adoc b/snippets/pg-ztp-the-policygenerator.adoc new file mode 100644 index 000000000000..120bcf3e0789 --- /dev/null +++ b/snippets/pg-ztp-the-policygenerator.adoc @@ -0,0 +1,76 @@ +:_mod-docs-content-type: SNIPPET +[source,yaml] +---- +apiVersion: policy.open-cluster-management.io/v1 +kind: PolicyGenerator +metadata: + name: common-latest +placementBindingDefaults: + name: common-latest-placement-binding <1> +policyDefaults: + namespace: ztp-common + placement: + labelSelector: + matchExpressions: + - key: common + operator: In + values: + - "true" + - key: du-profile + operator: In + values: + - latest + remediationAction: inform + severity: low + namespaceSelector: + exclude: + - kube-* + include: + - '*' + evaluationInterval: + compliant: 10m + noncompliant: 10s +policies: + - name: common-latest-config-policy + policyAnnotations: + ran.openshift.io/ztp-deploy-wave: "1" + manifests: + - path: source-crs/ReduceMonitoringFootprint.yaml + - path: source-crs/DefaultCatsrc.yaml <2> + patches: + - metadata: + name: redhat-operators-disconnected + spec: + displayName: disconnected-redhat-operators + image: registry.example.com:5000/disconnected-redhat-operators/disconnected-redhat-operator-index:v4.9 + - path: source-crs/DisconnectedICSP.yaml + patches: + - spec: + repositoryDigestMirrors: + - mirrors: + - registry.example.com:5000 + source: registry.redhat.io + - name: common-latest-subscriptions-policy + policyAnnotations: + ran.openshift.io/ztp-deploy-wave: "2" + manifests: <3> + - path: source-crs/SriovSubscriptionNS.yaml + - path: source-crs/SriovSubscriptionOperGroup.yaml + - path: source-crs/SriovSubscription.yaml + - path: source-crs/SriovOperatorStatus.yaml + - path: source-crs/PtpSubscriptionNS.yaml + - path: source-crs/PtpSubscriptionOperGroup.yaml + - path: source-crs/PtpSubscription.yaml + - path: source-crs/PtpOperatorStatus.yaml + - path: source-crs/ClusterLogNS.yaml + - path: source-crs/ClusterLogOperGroup.yaml + - path: source-crs/ClusterLogSubscription.yaml + - path: source-crs/ClusterLogOperatorStatus.yaml + - path: source-crs/StorageNS.yaml + - path: source-crs/StorageOperGroup.yaml + - path: source-crs/StorageSubscription.yaml + - path: source-crs/StorageOperatorStatus.yaml +---- +<1> Applies the policies to all clusters with this label. +<2> The `DefaultCatsrc.yaml` file contains the catalog source for the disconnected registry and related registry configuration details. +<3> Files listed under `policies.manifests` create the Operator policies for installed clusters. diff --git a/snippets/pg-ztp-using-pg-to-configure-high-performance-mode.yaml b/snippets/pg-ztp-using-pg-to-configure-high-performance-mode.yaml new file mode 100644 index 000000000000..25c48381f62a --- /dev/null +++ b/snippets/pg-ztp-using-pg-to-configure-high-performance-mode.yaml @@ -0,0 +1,7 @@ +- path: source-crs/PerformanceProfile.yaml + patches: + - spec: + workloadHints: + realTime: true + highPowerConsumption: true + perPodPowerManagement: false \ No newline at end of file diff --git a/snippets/pg-ztp-using-pg-to-configure-performance-mode.yaml b/snippets/pg-ztp-using-pg-to-configure-performance-mode.yaml new file mode 100644 index 000000000000..e50ebfac520a --- /dev/null +++ b/snippets/pg-ztp-using-pg-to-configure-performance-mode.yaml @@ -0,0 +1,7 @@ +- path: source-crs/PerformanceProfile.yaml + patches: + - spec: + workloadHints: + realTime: true + highPowerConsumption: false + perPodPowerManagement: false \ No newline at end of file diff --git a/snippets/pg-ztp-using-pg-to-configure-power-saving-mode.adoc b/snippets/pg-ztp-using-pg-to-configure-power-saving-mode.adoc new file mode 100644 index 000000000000..73f527fec1ce --- /dev/null +++ b/snippets/pg-ztp-using-pg-to-configure-power-saving-mode.adoc @@ -0,0 +1,17 @@ +:_mod-docs-content-type: SNIPPET +[source,yaml] +---- +- path: source-crs/PerformanceProfile.yaml + patches: + - spec: + # ... + workloadHints: + realTime: true + highPowerConsumption: false + perPodPowerManagement: true + # ... + additionalKernelArgs: + - # ... + - "cpufreq.default_governor=schedutil" <1> +---- +<1> The `schedutil` governor is recommended, however, you can also use other governors, including `ondemand` and `powersave`. diff --git a/snippets/pg-ztp-using-pg-to-maximize-power-saving-mode.adoc b/snippets/pg-ztp-using-pg-to-maximize-power-saving-mode.adoc new file mode 100644 index 000000000000..c65ef14c9205 --- /dev/null +++ b/snippets/pg-ztp-using-pg-to-maximize-power-saving-mode.adoc @@ -0,0 +1,14 @@ +:_mod-docs-content-type: SNIPPET +[source,yaml] +---- +- path: source-crs/TunedPerformancePatch.yaml + patches: + - spec: + profile: + - name: performance-patch + data: | + # ... + [sysfs] + /sys/devices/system/cpu/intel_pstate/max_perf_pct= <1> +---- +<1> The `max_perf_pct` controls the maximum frequency the `cpufreq` driver is allowed to set as a percentage of the maximum supported CPU frequency. This value applies to all CPUs. You can check the maximum supported frequency in `/sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_max_freq`. As a starting point, you can use a percentage that caps all CPUs at the `All Cores Turbo` frequency. The `All Cores Turbo` frequency is the frequency that all cores run at when the cores are all fully occupied. diff --git a/snippets/pg-ztp-worker-node-preparing-policies.adoc b/snippets/pg-ztp-worker-node-preparing-policies.adoc new file mode 100644 index 000000000000..299da20a7c19 --- /dev/null +++ b/snippets/pg-ztp-worker-node-preparing-policies.adoc @@ -0,0 +1,100 @@ +:_mod-docs-content-type: SNIPPET +[source,yaml] +---- +apiVersion: policy.open-cluster-management.io/v1 +kind: PolicyGenerator +metadata: + name: example-sno-workers +placementBindingDefaults: + name: example-sno-workers-placement-binding +policyDefaults: + namespace: example-sno + placement: + labelSelector: + matchExpressions: + - key: sites + operator: In + values: + - example-sno <1> + remediationAction: inform + severity: low + namespaceSelector: + exclude: + - kube-* + include: + - '*' + evaluationInterval: + compliant: 10m + noncompliant: 10s +policies: + - name: example-sno-workers-config-policy + policyAnnotations: + ran.openshift.io/ztp-deploy-wave: "10" + manifests: + - path: source-crs/MachineConfigGeneric.yaml <2> + patches: + - metadata: + labels: + machineconfiguration.openshift.io/role: worker <3> + name: enable-workload-partitioning + spec: + config: + storage: + files: + - contents: + source: data:text/plain;charset=utf-8;base64,W2NyaW8ucnVudGltZS53b3JrbG9hZHMubWFuYWdlbWVudF0KYWN0aXZhdGlvbl9hbm5vdGF0aW9uID0gInRhcmdldC53b3JrbG9hZC5vcGVuc2hpZnQuaW8vbWFuYWdlbWVudCIKYW5ub3RhdGlvbl9wcmVmaXggPSAicmVzb3VyY2VzLndvcmtsb2FkLm9wZW5zaGlmdC5pbyIKcmVzb3VyY2VzID0geyAiY3B1c2hhcmVzIiA9IDAsICJjcHVzZXQiID0gIjAtMyIgfQo= + mode: 420 + overwrite: true + path: /etc/crio/crio.conf.d/01-workload-partitioning + user: + name: root + - contents: + source: data:text/plain;charset=utf-8;base64,ewogICJtYW5hZ2VtZW50IjogewogICAgImNwdXNldCI6ICIwLTMiCiAgfQp9Cg== + mode: 420 + overwrite: true + path: /etc/kubernetes/openshift-workload-pinning + user: + name: root + - path: source-crs/PerformanceProfile-MCP-worker.yaml + patches: + - metadata: + name: openshift-worker-node-performance-profile + spec: + cpu: <4> + isolated: 4-47 + reserved: 0-3 + hugepages: + defaultHugepagesSize: 1G + pages: + - count: 32 + size: 1G + realTimeKernel: + enabled: true + - path: source-crs/TunedPerformancePatch-MCP-worker.yaml + patches: + - metadata: + name: performance-patch-worker + spec: + profile: + - data: | + [main] + summary=Configuration changes profile inherited from performance created tuned + include=openshift-node-performance-openshift-worker-node-performance-profile + [bootloader] + cmdline_crash=nohz_full=4-47 <5> + [sysctl] + kernel.timer_migration=1 + [scheduler] + group.ice-ptp=0:f:10:*:ice-ptp.* + [service] + service.stalld=start,enable + service.chronyd=stop,disable + name: performance-patch-worker + recommend: + - profile: performance-patch-worker +---- +<1> The policies are applied to all clusters with this label. +<2> This generic `MachineConfig` CR is used to configure workload partitioning on the worker node. +<3> The `MCP` field must be set to `worker`. +<4> The `cpu.isolated` and `cpu.reserved` fields must be configured for each particular hardware platform. +<5> The `cmdline_crash` CPU set must match the `cpu.isolated` set in the `PerformanceProfile` section. diff --git a/snippets/pgt-cnf-topology-aware-lifecycle-manager-operator-troubleshooting.adoc b/snippets/pgt-cnf-topology-aware-lifecycle-manager-operator-troubleshooting.adoc new file mode 100644 index 000000000000..242f4a78f682 --- /dev/null +++ b/snippets/pgt-cnf-topology-aware-lifecycle-manager-operator-troubleshooting.adoc @@ -0,0 +1,35 @@ +:_mod-docs-content-type: SNIPPET +[source,yaml] +---- +- fileName: DefaultCatsrc.yaml + remediationAction: inform + policyName: "operator-catsrc-policy" + metadata: + name: redhat-operators + spec: + displayName: Red Hat Operators Catalog + image: registry.example.com:5000/olm/redhat-operators:v{product-version} + updateStrategy: + registryPoll: + interval: 1h + status: + connectionState: + lastObservedState: READY +- fileName: DefaultCatsrc.yaml + remediationAction: inform + policyName: "operator-catsrc-policy" + metadata: + name: redhat-operators-v2 <1> + spec: + displayName: Red Hat Operators Catalog v2 <2> + image: registry.example.com:5000/olredhat-operators: <3> + updateStrategy: + registryPoll: + interval: 1h + status: + connectionState: + lastObservedState: READY +---- +<1> Update the name for the new configuration. +<2> Update the display name for the new configuration. +<3> Update the index image URL. This `fileName.spec.image` field overrides any configuration in the `DefaultCatsrc.yaml` file. diff --git a/snippets/pgt-cnf-topology-aware-lifecycle-manager-operator-update.adoc b/snippets/pgt-cnf-topology-aware-lifecycle-manager-operator-update.adoc new file mode 100644 index 000000000000..0ed0e2784190 --- /dev/null +++ b/snippets/pgt-cnf-topology-aware-lifecycle-manager-operator-update.adoc @@ -0,0 +1,32 @@ +:_mod-docs-content-type: SNIPPET +[source,yaml,subs="attributes+"] +---- +apiVersion: ran.openshift.io/v1 +kind: PolicyGenTemplate +metadata: + name: "du-upgrade" + namespace: "ztp-group-du-sno" +spec: + bindingRules: + group-du-sno: "" + mcp: "master" + remediationAction: inform + sourceFiles: + - fileName: DefaultCatsrc.yaml + remediationAction: inform + policyName: "operator-catsrc-policy" + metadata: + name: redhat-operators + spec: + displayName: Red Hat Operators Catalog + image: registry.example.com:5000/olm/redhat-operators:v{product-version} <1> + updateStrategy: <2> + registryPoll: + interval: 1h + status: + connectionState: + lastObservedState: READY <3> +---- +<1> The index image URL contains the desired Operator images. If the index images are always pushed to the same image name and tag, this change is not needed. +<2> Set how frequently the Operator Lifecycle Manager (OLM) polls the index image for new Operator versions with the `registryPoll.interval` field. This change is not needed if a new index image tag is always pushed for y-stream and z-stream Operator updates. The `registryPoll.interval` field can be set to a shorter interval to expedite the update, however shorter intervals increase computational load. To counteract this behavior, you can restore `registryPoll.interval` to the default value once the update is complete. +<3> Last observed state of the catalog connection. The `READY` value ensures that the `CatalogSource` policy is ready, indicating that the index pod is pulled and is running. This way, {cgu-operator} upgrades the Operators based on up-to-date policy compliance states. diff --git a/snippets/pgt-cnf-topology-aware-lifecycle-manager-pao-update.yaml b/snippets/pgt-cnf-topology-aware-lifecycle-manager-pao-update.yaml new file mode 100644 index 000000000000..37a4bf4fbdc3 --- /dev/null +++ b/snippets/pgt-cnf-topology-aware-lifecycle-manager-pao-update.yaml @@ -0,0 +1,9 @@ +- fileName: PaoSubscriptionNS.yaml + policyName: "subscriptions-policy" + complianceType: mustnothave +- fileName: PaoSubscriptionOperGroup.yaml + policyName: "subscriptions-policy" + complianceType: mustnothave +- fileName: PaoSubscription.yaml + policyName: "subscriptions-policy" + complianceType: mustnothave \ No newline at end of file diff --git a/snippets/pgt-cnf-topology-aware-lifecycle-manager-platform-update.adoc b/snippets/pgt-cnf-topology-aware-lifecycle-manager-platform-update.adoc new file mode 100644 index 000000000000..d4e6efcb36a6 --- /dev/null +++ b/snippets/pgt-cnf-topology-aware-lifecycle-manager-platform-update.adoc @@ -0,0 +1,48 @@ +:_mod-docs-content-type: SNIPPET +[source,yaml,subs="attributes+"] +---- +apiVersion: ran.openshift.io/v1 +kind: PolicyGenTemplate +metadata: + name: "du-upgrade" + namespace: "ztp-group-du-sno" +spec: + bindingRules: + group-du-sno: "" + mcp: "master" + remediationAction: inform + sourceFiles: + - fileName: ImageSignature.yaml <1> + policyName: "platform-upgrade-prep" + binaryData: + ${DIGEST_ALGO}-${DIGEST_ENCODED}: ${SIGNATURE_BASE64} <2> + - fileName: DisconnectedICSP.yaml + policyName: "platform-upgrade-prep" + metadata: + name: disconnected-internal-icsp-for-ocp + spec: + repositoryDigestMirrors: <3> + - mirrors: + - quay-intern.example.com/ocp4/openshift-release-dev + source: quay.io/openshift-release-dev/ocp-release + - mirrors: + - quay-intern.example.com/ocp4/openshift-release-dev + source: quay.io/openshift-release-dev/ocp-v4.0-art-dev + - fileName: ClusterVersion.yaml <4> + policyName: "platform-upgrade" + metadata: + name: version + spec: + channel: "stable-{product-version}" + upstream: http://upgrade.example.com/images/upgrade-graph_stable-{product-version} + desiredUpdate: + version: {product-version}.4 + status: + history: + - version: {product-version}.4 + state: "Completed" +---- +<1> The `ConfigMap` CR contains the signature of the desired release image to update to. +<2> Shows the image signature of the desired {product-title} release. Get the signature from the `checksum-${OCP_RELEASE_NUMBER}.yaml` file you saved when following the procedures in the "Setting up the environment" section. +<3> Shows the mirror repository that contains the desired {product-title} image. Get the mirrors from the `imageContentSources.yaml` file that you saved when following the procedures in the "Setting up the environment" section. +<4> Shows the `ClusterVersion` CR to trigger the update. The `channel`, `upstream`, and `desiredVersion` fields are all required for image pre-caching. diff --git a/snippets/pgt-deprecation-notice.adoc b/snippets/pgt-deprecation-notice.adoc new file mode 100644 index 000000000000..3a77abf30603 --- /dev/null +++ b/snippets/pgt-deprecation-notice.adoc @@ -0,0 +1,8 @@ +:_mod-docs-content-type: SNIPPET +[IMPORTANT] +==== +Using `PolicyGenTemplate` CRs to manage and deploy polices to managed clusters will be deprecated in an upcoming {product-title} release. +Equivalent and improved functionality is available using {rh-rhacm-first} and `PolicyGenerator` CRs. + +For more information about `PolicyGenerator` resources, see the {rh-rhacm} link:https://access.redhat.com/documentation/en-us/red_hat_advanced_cluster_management_for_kubernetes/{rh-rhacm-version}/html/governance/integrate-third-party-policy-controllers#policy-generator[Policy Generator] documentation. +==== diff --git a/snippets/pgt-group-du-sno-config-policy.yaml b/snippets/pgt-group-du-sno-config-policy.yaml new file mode 100644 index 000000000000..82471c83ec2e --- /dev/null +++ b/snippets/pgt-group-du-sno-config-policy.yaml @@ -0,0 +1,54 @@ +apiVersion: policy.open-cluster-management.io/v1 +kind: Policy +metadata: + name: group-du-ptp-config-policy + namespace: groups-sub + annotations: + policy.open-cluster-management.io/categories: CM Configuration Management + policy.open-cluster-management.io/controls: CM-2 Baseline Configuration + policy.open-cluster-management.io/standards: NIST SP 800-53 +spec: + remediationAction: inform + disabled: false + policy-templates: + - objectDefinition: + apiVersion: policy.open-cluster-management.io/v1 + kind: ConfigurationPolicy + metadata: + name: group-du-ptp-config-policy-config + spec: + remediationAction: inform + severity: low + namespaceselector: + exclude: + - kube-* + include: + - '*' + object-templates: + - complianceType: musthave + objectDefinition: + apiVersion: ptp.openshift.io/v1 + kind: PtpConfig + metadata: + name: du-ptp-slave + namespace: openshift-ptp + spec: + recommend: + - match: + - nodeLabel: node-role.kubernetes.io/worker-du + priority: 4 + profile: slave + profile: + - interface: ens5f0 + name: slave + phc2sysOpts: -a -r -n 24 + ptp4lConf: | + [global] + # + # Default Data Set + # + twoStepFlag 1 + slaveOnly 0 + priority1 128 + priority2 128 + domainNumber 24 \ No newline at end of file diff --git a/snippets/pgt-sriov-fec-cnf-topology-aware-lifecycle-manager-operator-update.adoc b/snippets/pgt-sriov-fec-cnf-topology-aware-lifecycle-manager-operator-update.adoc new file mode 100644 index 000000000000..625521681c10 --- /dev/null +++ b/snippets/pgt-sriov-fec-cnf-topology-aware-lifecycle-manager-operator-update.adoc @@ -0,0 +1,32 @@ +:_mod-docs-content-type: SNIPPET +[source,yaml] +---- +apiVersion: ran.openshift.io/v1 +kind: PolicyGenTemplate +metadata: + name: "du-upgrade" + namespace: "ztp-group-du-sno" +spec: + bindingRules: + group-du-sno: "" + mcp: "master" + remediationAction: inform + sourceFiles: + # ... + - fileName: DefaultCatsrc.yaml + remediationAction: inform + policyName: "fec-catsrc-policy" + metadata: + name: certified-operators + spec: + displayName: Intel SRIOV-FEC Operator + image: registry.example.com:5000/olm/far-edge-sriov-fec:v4.10 + updateStrategy: + registryPoll: + interval: 10m + - fileName: AcceleratorsSubscription.yaml + policyName: "subscriptions-fec-policy" + spec: + channel: "stable" + source: certified-operators +---- diff --git a/snippets/pgt-using-ztp-to-update-source-crs.yaml b/snippets/pgt-using-ztp-to-update-source-crs.yaml new file mode 100644 index 000000000000..089d0a4bbd9a --- /dev/null +++ b/snippets/pgt-using-ztp-to-update-source-crs.yaml @@ -0,0 +1,15 @@ +- fileName: PerformanceProfile.yaml + policyName: "config-policy" + metadata: + name: openshift-node-performance-profile + spec: + cpu: + # These must be tailored for the specific hardware platform + isolated: "2-19,22-39" + reserved: "0-1,20-21" + hugepages: + defaultHugepagesSize: 1G + pages: + - size: 1G + count: 10 + globallyDisableIrqLoadBalancing: false diff --git a/snippets/pgt-ztp-adding-new-content-to-gitops-ztp-folder-structure.adoc b/snippets/pgt-ztp-adding-new-content-to-gitops-ztp-folder-structure.adoc new file mode 100644 index 000000000000..755defb2ceb4 --- /dev/null +++ b/snippets/pgt-ztp-adding-new-content-to-gitops-ztp-folder-structure.adoc @@ -0,0 +1,20 @@ +:_mod-docs-content-type: SNIPPET +[source,terminal] +---- +example +└── policygentemplates + ├── dev.yaml + ├── kustomization.yaml + ├── mec-edge-sno1.yaml + ├── sno.yaml + └── source-crs <1> + ├── PaoCatalogSource.yaml + ├── PaoSubscription.yaml + ├── custom-crs + | ├── apiserver-config.yaml + | └── disable-nic-lldp.yaml + └── elasticsearch + ├── ElasticsearchNS.yaml + └── ElasticsearchOperatorGroup.yaml +---- +<1> The `source-crs` subdirectory must be in the same directory as the `kustomization.yaml` file. diff --git a/snippets/pgt-ztp-adding-new-content-to-gitops-ztp.adoc b/snippets/pgt-ztp-adding-new-content-to-gitops-ztp.adoc new file mode 100644 index 000000000000..1e00cfe1f846 --- /dev/null +++ b/snippets/pgt-ztp-adding-new-content-to-gitops-ztp.adoc @@ -0,0 +1,63 @@ +:_mod-docs-content-type: SNIPPET +[source,yaml] +---- +apiVersion: ran.openshift.io/v1 +kind: PolicyGenTemplate +metadata: + name: "group-dev" + namespace: "ztp-clusters" +spec: + bindingRules: + dev: "true" + mcp: "master" + sourceFiles: + # These policies/CRs come from the internal container Image + #Cluster Logging + - fileName: ClusterLogNS.yaml + remediationAction: inform + policyName: "group-dev-cluster-log-ns" + - fileName: ClusterLogOperGroup.yaml + remediationAction: inform + policyName: "group-dev-cluster-log-operator-group" + - fileName: ClusterLogSubscription.yaml + remediationAction: inform + policyName: "group-dev-cluster-log-sub" + #Local Storage Operator + - fileName: StorageNS.yaml + remediationAction: inform + policyName: "group-dev-lso-ns" + - fileName: StorageOperGroup.yaml + remediationAction: inform + policyName: "group-dev-lso-operator-group" + - fileName: StorageSubscription.yaml + remediationAction: inform + policyName: "group-dev-lso-sub" + #These are custom local polices that come from the source-crs directory in the git repo + # Performance Addon Operator + - fileName: PaoSubscriptionNS.yaml + remediationAction: inform + policyName: "group-dev-pao-ns" + - fileName: PaoSubscriptionCatalogSource.yaml + remediationAction: inform + policyName: "group-dev-pao-cat-source" + spec: + image: + - fileName: PaoSubscription.yaml + remediationAction: inform + policyName: "group-dev-pao-sub" + #Elasticsearch Operator + - fileName: elasticsearch/ElasticsearchNS.yaml <1> + remediationAction: inform + policyName: "group-dev-elasticsearch-ns" + - fileName: elasticsearch/ElasticsearchOperatorGroup.yaml + remediationAction: inform + policyName: "group-dev-elasticsearch-operator-group" + #Custom Resources + - fileName: custom-crs/apiserver-config.yaml <1> + remediationAction: inform + policyName: "group-dev-apiserver-config" + - fileName: custom-crs/disable-nic-lldp.yaml + remediationAction: inform + policyName: "group-dev-disable-nic-lldp" +---- +<1> Set `fileName` to include the relative path to the file from the `/source-crs` parent directory. diff --git a/snippets/pgt-ztp-configuring-hwevents-using-pgt-hardware-event.adoc b/snippets/pgt-ztp-configuring-hwevents-using-pgt-hardware-event.adoc new file mode 100644 index 000000000000..2a6905f418a9 --- /dev/null +++ b/snippets/pgt-ztp-configuring-hwevents-using-pgt-hardware-event.adoc @@ -0,0 +1,11 @@ +:_mod-docs-content-type: SNIPPET +[source,yaml] +---- +- fileName: HardwareEvent.yaml <1> + policyName: "config-policy" + spec: + nodeSelector: {} + transportHost: "http://hw-event-publisher-service.openshift-bare-metal-events.svc.cluster.local:9043" + logLevel: "info" +---- +<1> Each baseboard management controller (BMC) requires a single `HardwareEvent` CR only. diff --git a/snippets/pgt-ztp-configuring-hwevents-using-pgt.yaml b/snippets/pgt-ztp-configuring-hwevents-using-pgt.yaml new file mode 100644 index 000000000000..dc3f558cdfc7 --- /dev/null +++ b/snippets/pgt-ztp-configuring-hwevents-using-pgt.yaml @@ -0,0 +1,7 @@ +# Bare Metal Event Relay Operator +- fileName: BareMetalEventRelaySubscriptionNS.yaml + policyName: "subscriptions-policy" +- fileName: BareMetalEventRelaySubscriptionOperGroup.yaml + policyName: "subscriptions-policy" +- fileName: BareMetalEventRelaySubscription.yaml + policyName: "subscriptions-policy" \ No newline at end of file diff --git a/snippets/pgt-ztp-configuring-ptp-fast-events-amqp-transport.yaml b/snippets/pgt-ztp-configuring-ptp-fast-events-amqp-transport.yaml new file mode 100644 index 000000000000..c7d52280e160 --- /dev/null +++ b/snippets/pgt-ztp-configuring-ptp-fast-events-amqp-transport.yaml @@ -0,0 +1,7 @@ +- fileName: PtpOperatorConfigForEvent.yaml + policyName: "config-policy" + spec: + daemonNodeSelector: {} + ptpEventConfig: + enableEventPublisher: true + transportHost: "amqp://amq-router.amq-router.svc.cluster.local" \ No newline at end of file diff --git a/snippets/pgt-ztp-configuring-ptp-fast-events-amqp.yaml b/snippets/pgt-ztp-configuring-ptp-fast-events-amqp.yaml new file mode 100644 index 000000000000..35751e41386e --- /dev/null +++ b/snippets/pgt-ztp-configuring-ptp-fast-events-amqp.yaml @@ -0,0 +1,7 @@ +#AMQ interconnect operator for fast events +- fileName: AmqSubscriptionNS.yaml + policyName: "subscriptions-policy" +- fileName: AmqSubscriptionOperGroup.yaml + policyName: "subscriptions-policy" +- fileName: AmqSubscription.yaml + policyName: "subscriptions-policy" \ No newline at end of file diff --git a/snippets/pgt-ztp-configuring-ptp-fast-events-linuxptp.adoc b/snippets/pgt-ztp-configuring-ptp-fast-events-linuxptp.adoc new file mode 100644 index 000000000000..a9a8637ca4b8 --- /dev/null +++ b/snippets/pgt-ztp-configuring-ptp-fast-events-linuxptp.adoc @@ -0,0 +1,23 @@ +:_mod-docs-content-type: SNIPPET +[source,yaml] +---- +- fileName: PtpConfigSlave.yaml <1> + policyName: "config-policy" + metadata: + name: "du-ptp-slave" + spec: + profile: + - name: "slave" + interface: "ens5f1" <2> + ptp4lOpts: "-2 -s --summary_interval -4" <3> + phc2sysOpts: "-a -r -m -n 24 -N 8 -R 16" <4> + ptpClockThreshold: <5> + holdOverTimeout: 30 # seconds + maxOffsetThreshold: 100 # nano seconds + minOffsetThreshold: -100 +---- +<1> Can be one of `PtpConfigMaster.yaml`, `PtpConfigSlave.yaml`, or `PtpConfigSlaveCvl.yaml` depending on your requirements. `PtpConfigSlaveCvl.yaml` configures `linuxptp` services for an Intel E810 Columbiaville NIC. For configurations based on `{policy-prefix}group-du-sno-ranGen.yaml` or `{policy-prefix}group-du-3node-ranGen.yaml`, use `PtpConfigSlave.yaml`. +<2> Device specific interface name. +<3> You must append the `--summary_interval -4` value to `ptp4lOpts` in `.spec.sourceFiles.spec.profile` to enable PTP fast events. +<4> Required `phc2sysOpts` values. `-m` prints messages to `stdout`. The `linuxptp-daemon` `DaemonSet` parses the logs and generates Prometheus metrics. +<5> Optional. If the `ptpClockThreshold` stanza is not present, default values are used for the `ptpClockThreshold` fields. The stanza shows default `ptpClockThreshold` values. The `ptpClockThreshold` values configure how long after the PTP master clock is disconnected before PTP events are triggered. `holdOverTimeout` is the time value in seconds before the PTP clock event state changes to `FREERUN` when the PTP master clock is disconnected. The `maxOffsetThreshold` and `minOffsetThreshold` settings configure offset values in nanoseconds that compare against the values for `CLOCK_REALTIME` (`phc2sys`) or master offset (`ptp4l`). When the `ptp4l` or `phc2sys` offset value is outside this range, the PTP clock state is set to `FREERUN`. When the offset value is within this range, the PTP clock state is set to `LOCKED`. diff --git a/snippets/pgt-ztp-configuring-ptp-fast-events.yaml b/snippets/pgt-ztp-configuring-ptp-fast-events.yaml new file mode 100644 index 000000000000..4c928dd50c07 --- /dev/null +++ b/snippets/pgt-ztp-configuring-ptp-fast-events.yaml @@ -0,0 +1,7 @@ +- fileName: PtpOperatorConfigForEvent.yaml + policyName: "config-policy" + spec: + daemonNodeSelector: {} + ptpEventConfig: + enableEventPublisher: true + transportHost: http://ptp-event-publisher-service-NODE_NAME.openshift-ptp.svc.cluster.local:9043 diff --git a/snippets/pgt-ztp-creating-hwevents-amqp.yaml b/snippets/pgt-ztp-creating-hwevents-amqp.yaml new file mode 100644 index 000000000000..697ba36225c7 --- /dev/null +++ b/snippets/pgt-ztp-creating-hwevents-amqp.yaml @@ -0,0 +1,14 @@ +# AMQ Interconnect Operator for fast events +- fileName: AmqSubscriptionNS.yaml + policyName: "subscriptions-policy" +- fileName: AmqSubscriptionOperGroup.yaml + policyName: "subscriptions-policy" +- fileName: AmqSubscription.yaml + policyName: "subscriptions-policy" +# Bare Metal Event Relay Operator +- fileName: BareMetalEventRelaySubscriptionNS.yaml + policyName: "subscriptions-policy" +- fileName: BareMetalEventRelaySubscriptionOperGroup.yaml + policyName: "subscriptions-policy" +- fileName: BareMetalEventRelaySubscription.yaml + policyName: "subscriptions-policy" \ No newline at end of file diff --git a/snippets/pgt-ztp-example-single-node-cluster-validator.adoc b/snippets/pgt-ztp-example-single-node-cluster-validator.adoc new file mode 100644 index 000000000000..9d62ef4a9bb8 --- /dev/null +++ b/snippets/pgt-ztp-example-single-node-cluster-validator.adoc @@ -0,0 +1,27 @@ +.Example single-node cluster validator inform policy CR (group-du-sno-validator-ranGen.yaml) +[source,yaml] +---- +apiVersion: ran.openshift.io/v1 +kind: PolicyGenTemplate +metadata: + name: "group-du-sno-validator" <1> + namespace: "ztp-group" <2> +spec: + bindingRules: + group-du-sno: "" <3> + bindingExcludedRules: + ztp-done: "" <4> + mcp: "master" <5> + sourceFiles: + - fileName: validatorCRs/informDuValidator.yaml + remediationAction: inform <6> + policyName: "du-policy" <7> +---- +<1> The name of the `{policy-gen-crs}` object. This name is also used as part of the names +for the `placementBinding`, `placementRule`, and `policy` that are created in the requested `namespace`. +<2> This value should match the `namespace` used in the group `policy-gen-crs`. +<3> The `group-du-*` label defined in `bindingRules` must exist in the `SiteConfig` files. +<4> The label defined in `bindingExcludedRules` must be`ztp-done:`. The `ztp-done` label is used in coordination with the {cgu-operator-full}. +<5> `mcp` defines the `MachineConfigPool` object that is used in the source file `validatorCRs/informDuValidator.yaml`. It should be `master` for single node and three-node cluster deployments and `worker` for standard cluster deployments. +<6> Optional. The default value is `inform`. +<7> This value is used as part of the name for the generated {rh-rhacm} policy. The generated validator policy for the single node example is `group-du-sno-validator-du-policy`. diff --git a/snippets/pgt-ztp-provisioning-lvm-storage-cluster.adoc b/snippets/pgt-ztp-provisioning-lvm-storage-cluster.adoc new file mode 100644 index 000000000000..bbf4870bd61e --- /dev/null +++ b/snippets/pgt-ztp-provisioning-lvm-storage-cluster.adoc @@ -0,0 +1,14 @@ +:_mod-docs-content-type: SNIPPET +[source,yaml] +---- +- fileName: StorageLVMCluster.yaml + policyName: "lvms-config" + spec: + storage: + deviceClasses: + - name: vg1 + thinPoolConfig: + name: thin-pool-1 + sizePercent: 90 + overprovisionRatio: 10 +---- diff --git a/snippets/pgt-ztp-provisioning-lvm-storage-sub.yaml b/snippets/pgt-ztp-provisioning-lvm-storage-sub.yaml new file mode 100644 index 000000000000..aca70a7cfeff --- /dev/null +++ b/snippets/pgt-ztp-provisioning-lvm-storage-sub.yaml @@ -0,0 +1,6 @@ +- fileName: StorageLVMSubscriptionNS.yaml + policyName: subscription-policies +- fileName: StorageLVMSubscriptionOperGroup.yaml + policyName: subscription-policies +- fileName: StorageLVMSubscription.yaml + policyName: subscription-policies \ No newline at end of file diff --git a/snippets/pgt-ztp-provisioning-lvm-storage.adoc b/snippets/pgt-ztp-provisioning-lvm-storage.adoc new file mode 100644 index 000000000000..09eb8dd0f924 --- /dev/null +++ b/snippets/pgt-ztp-provisioning-lvm-storage.adoc @@ -0,0 +1,13 @@ +:_mod-docs-content-type: SNIPPET +[source,yaml,subs="attributes+"] +---- +- fileName: StorageLVMOSubscriptionNS.yaml + policyName: subscription-policies +- fileName: StorageLVMOSubscriptionOperGroup.yaml + policyName: subscription-policies +- fileName: StorageLVMOSubscription.yaml + spec: + name: lvms-operator + channel: stable-{product-version} + policyName: subscription-policies +---- diff --git a/snippets/pgt-ztp-the-policygentemplate.adoc b/snippets/pgt-ztp-the-policygentemplate.adoc new file mode 100644 index 000000000000..cd7ed3777688 --- /dev/null +++ b/snippets/pgt-ztp-the-policygentemplate.adoc @@ -0,0 +1,64 @@ +:_mod-docs-content-type: SNIPPET +[source,yaml] +---- +apiVersion: ran.openshift.io/v1 +kind: PolicyGenTemplate +metadata: + name: "common-latest" + namespace: "ztp-common" +spec: + bindingRules: + common: "true" <1> + du-profile: "latest" + sourceFiles: <2> + - fileName: SriovSubscriptionNS.yaml + policyName: "subscriptions-policy" + - fileName: SriovSubscriptionOperGroup.yaml + policyName: "subscriptions-policy" + - fileName: SriovSubscription.yaml + policyName: "subscriptions-policy" + - fileName: SriovOperatorStatus.yaml + policyName: "subscriptions-policy" + - fileName: PtpSubscriptionNS.yaml + policyName: "subscriptions-policy" + - fileName: PtpSubscriptionOperGroup.yaml + policyName: "subscriptions-policy" + - fileName: PtpSubscription.yaml + policyName: "subscriptions-policy" + - fileName: PtpOperatorStatus.yaml + policyName: "subscriptions-policy" + - fileName: ClusterLogNS.yaml + policyName: "subscriptions-policy" + - fileName: ClusterLogOperGroup.yaml + policyName: "subscriptions-policy" + - fileName: ClusterLogSubscription.yaml + policyName: "subscriptions-policy" + - fileName: ClusterLogOperatorStatus.yaml + policyName: "subscriptions-policy" + - fileName: StorageNS.yaml + policyName: "subscriptions-policy" + - fileName: StorageOperGroup.yaml + policyName: "subscriptions-policy" + - fileName: StorageSubscription.yaml + policyName: "subscriptions-policy" + - fileName: StorageOperatorStatus.yaml + policyName: "subscriptions-policy" + - fileName: DefaultCatsrc.yaml <3> + policyName: "config-policy" <4> + metadata: + name: redhat-operators-disconnected + spec: + displayName: disconnected-redhat-operators + image: registry.example.com:5000/disconnected-redhat-operators/disconnected-redhat-operator-index:v4.9 + - fileName: DisconnectedICSP.yaml + policyName: "config-policy" + spec: + repositoryDigestMirrors: + - mirrors: + - registry.example.com:5000 + source: registry.redhat.io +---- +<1> `common: "true"` applies the policies to all clusters with this label. +<2> Files listed under `sourceFiles` create the Operator policies for installed clusters. +<3> `DefaultCatsrc.yaml` configures the catalog source for the disconnected registry. +<4> `policyName: "config-policy"` configures Operator subscriptions. The `OperatorHub` CR disables the default and this CR replaces `redhat-operators` with a `CatalogSource` CR that points to the disconnected registry. diff --git a/snippets/pgt-ztp-using-pgt-to-configure-high-performance-mode.yaml b/snippets/pgt-ztp-using-pgt-to-configure-high-performance-mode.yaml new file mode 100644 index 000000000000..345edf98f134 --- /dev/null +++ b/snippets/pgt-ztp-using-pgt-to-configure-high-performance-mode.yaml @@ -0,0 +1,10 @@ +- fileName: PerformanceProfile.yaml + policyName: "config-policy" + metadata: + # ... + spec: + # ... + workloadHints: + realTime: true + highPowerConsumption: true + perPodPowerManagement: false diff --git a/snippets/pgt-ztp-using-pgt-to-configure-performance-mode.yaml b/snippets/pgt-ztp-using-pgt-to-configure-performance-mode.yaml new file mode 100644 index 000000000000..61d4b37ae810 --- /dev/null +++ b/snippets/pgt-ztp-using-pgt-to-configure-performance-mode.yaml @@ -0,0 +1,10 @@ +- fileName: PerformanceProfile.yaml + policyName: "config-policy" + metadata: + # ... + spec: + # ... + workloadHints: + realTime: true + highPowerConsumption: false + perPodPowerManagement: false diff --git a/snippets/pgt-ztp-using-pgt-to-configure-power-saving-mode.adoc b/snippets/pgt-ztp-using-pgt-to-configure-power-saving-mode.adoc new file mode 100644 index 000000000000..0e33606979a2 --- /dev/null +++ b/snippets/pgt-ztp-using-pgt-to-configure-power-saving-mode.adoc @@ -0,0 +1,19 @@ +:_mod-docs-content-type: SNIPPET +[source,yaml] +---- +- fileName: PerformanceProfile.yaml + policyName: "config-policy" + metadata: + # ... + spec: + # ... + workloadHints: + realTime: true + highPowerConsumption: false + perPodPowerManagement: true + # ... + additionalKernelArgs: + - # ... + - "cpufreq.default_governor=schedutil" <1> +---- +<1> The `schedutil` governor is recommended, however, other governors that can be used include `ondemand` and `powersave`. diff --git a/snippets/pgt-ztp-using-pgt-to-maximize-power-saving-mode.adoc b/snippets/pgt-ztp-using-pgt-to-maximize-power-saving-mode.adoc new file mode 100644 index 000000000000..34bedf22905e --- /dev/null +++ b/snippets/pgt-ztp-using-pgt-to-maximize-power-saving-mode.adoc @@ -0,0 +1,14 @@ +:_mod-docs-content-type: SNIPPET +[source,yaml] +---- +- fileName: TunedPerformancePatch.yaml + policyName: "config-policy" + spec: + profile: + - name: performance-patch + data: | + # ... + [sysfs] + /sys/devices/system/cpu/intel_pstate/max_perf_pct= <1> +---- +<1> The `max_perf_pct` controls the maximum frequency the `cpufreq` driver is allowed to set as a percentage of the maximum supported CPU frequency. This value applies to all CPUs. You can check the maximum supported frequency in `/sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_max_freq`. As a starting point, you can use a percentage that caps all CPUs at the `All Cores Turbo` frequency. The `All Cores Turbo` frequency is the frequency that all cores will run at when the cores are all fully occupied. diff --git a/snippets/pgt-ztp-worker-node-preparing-policies.adoc b/snippets/pgt-ztp-worker-node-preparing-policies.adoc new file mode 100644 index 000000000000..3abad4dbc6e1 --- /dev/null +++ b/snippets/pgt-ztp-worker-node-preparing-policies.adoc @@ -0,0 +1,80 @@ +:_mod-docs-content-type: SNIPPET +[source,yaml] +---- +apiVersion: ran.openshift.io/v1 +kind: PolicyGenTemplate +metadata: + name: "example-sno-workers" + namespace: "example-sno" +spec: + bindingRules: + sites: "example-sno" <1> + mcp: "worker" <2> + sourceFiles: + - fileName: MachineConfigGeneric.yaml <3> + policyName: "config-policy" + metadata: + labels: + machineconfiguration.openshift.io/role: worker + name: enable-workload-partitioning + spec: + config: + storage: + files: + - contents: + source: data:text/plain;charset=utf-8;base64,W2NyaW8ucnVudGltZS53b3JrbG9hZHMubWFuYWdlbWVudF0KYWN0aXZhdGlvbl9hbm5vdGF0aW9uID0gInRhcmdldC53b3JrbG9hZC5vcGVuc2hpZnQuaW8vbWFuYWdlbWVudCIKYW5ub3RhdGlvbl9wcmVmaXggPSAicmVzb3VyY2VzLndvcmtsb2FkLm9wZW5zaGlmdC5pbyIKcmVzb3VyY2VzID0geyAiY3B1c2hhcmVzIiA9IDAsICJjcHVzZXQiID0gIjAtMyIgfQo= + mode: 420 + overwrite: true + path: /etc/crio/crio.conf.d/01-workload-partitioning + user: + name: root + - contents: + source: data:text/plain;charset=utf-8;base64,ewogICJtYW5hZ2VtZW50IjogewogICAgImNwdXNldCI6ICIwLTMiCiAgfQp9Cg== + mode: 420 + overwrite: true + path: /etc/kubernetes/openshift-workload-pinning + user: + name: root + - fileName: PerformanceProfile.yaml + policyName: "config-policy" + metadata: + name: openshift-worker-node-performance-profile + spec: + cpu: <4> + isolated: "4-47" + reserved: "0-3" + hugepages: + defaultHugepagesSize: 1G + pages: + - size: 1G + count: 32 + realTimeKernel: + enabled: true + - fileName: TunedPerformancePatch.yaml + policyName: "config-policy" + metadata: + name: performance-patch-worker + spec: + profile: + - name: performance-patch-worker + data: | + [main] + summary=Configuration changes profile inherited from performance created tuned + include=openshift-node-performance-openshift-worker-node-performance-profile + [bootloader] + cmdline_crash=nohz_full=4-47 <5> + [sysctl] + kernel.timer_migration=1 + [scheduler] + group.ice-ptp=0:f:10:*:ice-ptp.* + [service] + service.stalld=start,enable + service.chronyd=stop,disable + recommend: + - profile: performance-patch-worker +---- +<1> The policies are applied to all clusters with this label. +<2> The `MCP` field must be set to `worker`. +<3> This generic `MachineConfig` CR is used to configure workload partitioning on the worker node. +<4> The `cpu.isolated` and `cpu.reserved` fields must be configured for each particular hardware platform. +<5> The `cmdline_crash` CPU set must match the `cpu.isolated` set in the `PerformanceProfile` section. diff --git a/snippets/ptp-amq-interconnect-eol.adoc b/snippets/ptp-amq-interconnect-eol.adoc index d2810115bf88..4875975de04b 100644 --- a/snippets/ptp-amq-interconnect-eol.adoc +++ b/snippets/ptp-amq-interconnect-eol.adoc @@ -1,3 +1,4 @@ +:_mod-docs-content-type: SNIPPET [NOTE] ==== HTTP transport is the default transport for PTP and bare-metal events. diff --git a/snippets/technology-preview.adoc b/snippets/technology-preview.adoc index 9c1acea0a89b..a91b9795f01d 100644 --- a/snippets/technology-preview.adoc +++ b/snippets/technology-preview.adoc @@ -9,4 +9,4 @@ For more information about the support scope of Red Hat Technology Preview features, see link:https://access.redhat.com/support/offerings/techpreview/[Technology Preview Features Support Scope]. ==== // Undefine {FeatureName} attribute, so that any mistakes are easily spotted -:!FeatureName: +:!FeatureName: The Alibaba Cloud installation with Assisted Installer diff --git a/snippets/terraform-modification-disclaimer.adoc b/snippets/terraform-modification-disclaimer.adoc index 3e7911ac7de7..f5526a53b30b 100644 --- a/snippets/terraform-modification-disclaimer.adoc +++ b/snippets/terraform-modification-disclaimer.adoc @@ -1,6 +1,6 @@ // Module included in the following assemblies: // -// * rosa_install_access_delete_clusters/terraform/rosa-sts-creating-a-cluster-quickly-terraform.adoc +// * rosa_install_access_delete_clusters/terraform/rosa-classic-creating-a-cluster-quickly-terraform.adoc // * rosa_planning/rosa-understanding-terraform.adoc :_mod-docs-content-type: SNIPPET diff --git a/snippets/ztp-creating-hwevents-amqp-hardware-event.adoc b/snippets/ztp-creating-hwevents-amqp-hardware-event.adoc new file mode 100644 index 000000000000..bd91a8084378 --- /dev/null +++ b/snippets/ztp-creating-hwevents-amqp-hardware-event.adoc @@ -0,0 +1,10 @@ +:_mod-docs-content-type: SNIPPET +[source,yaml] +---- +- path: HardwareEvent.yaml + patches: + nodeSelector: {} + transportHost: "amqp://..svc.cluster.local" <1> + logLevel: "info" +---- +<1> The `transportHost` URL is composed of the existing AMQ Interconnect CR `name` and `namespace`. For example, in `transportHost: "amqp://amq-router.amq-router.svc.cluster.local"`, the AMQ Interconnect `name` and `namespace` are both set to `amq-router`. diff --git a/snippets/ztp-specifying-nics-in-pgt-hub-cluster-templates.yaml b/snippets/ztp-specifying-nics-in-pgt-hub-cluster-templates.yaml new file mode 100644 index 000000000000..4f6e605f6f82 --- /dev/null +++ b/snippets/ztp-specifying-nics-in-pgt-hub-cluster-templates.yaml @@ -0,0 +1,80 @@ +apiVersion: ran.openshift.io/v1 +kind: PolicyGenTemplate +metadata: + name: group-du-sno-pgt + namespace: ztp-group +spec: + bindingRules: + # These policies will correspond to all clusters with these labels + group-du-sno-zone: "zone-1" + hardware-type: "hardware-type-1" + mcp: "master" + sourceFiles: + - fileName: ClusterLogForwarder.yaml # wave 10 + policyName: "group-du-sno-cfg-policy" + spec: + outputs: '{{hub fromConfigMap "" "group-zones-configmap" (printf "%s-cluster-log-fwd-outputs" (index .ManagedClusterLabels "group-du-sno-zone")) | toLiteral hub}}' + pipelines: '{{hub fromConfigMap "" "group-zones-configmap" (printf "%s-cluster-log-fwd-pipelines" (index .ManagedClusterLabels "group-du-sno-zone")) | toLiteral hub}}' + + - fileName: PerformanceProfile.yaml # wave 10 + policyName: "group-du-sno-cfg-policy" + metadata: + name: openshift-node-performance-profile + spec: + additionalKernelArgs: + - rcupdate.rcu_normal_after_boot=0 + - vfio_pci.enable_sriov=1 + - vfio_pci.disable_idle_d3=1 + - efi=runtime + cpu: + isolated: '{{hub fromConfigMap "" "group-hardware-types-configmap" (printf "%s-cpu-isolated" (index .ManagedClusterLabels "hardware-type")) hub}}' + reserved: '{{hub fromConfigMap "" "group-hardware-types-configmap" (printf "%s-cpu-reserved" (index .ManagedClusterLabels "hardware-type")) hub}}' + hugepages: + defaultHugepagesSize: '{{hub fromConfigMap "" "group-hardware-types-configmap" (printf "%s-hugepages-default" (index .ManagedClusterLabels "hardware-type")) hub}}' + pages: + - size: '{{hub fromConfigMap "" "group-hardware-types-configmap" (printf "%s-hugepages-size" (index .ManagedClusterLabels "hardware-type")) hub}}' + count: '{{hub fromConfigMap "" "group-hardware-types-configmap" (printf "%s-hugepages-count" (index .ManagedClusterLabels "hardware-type")) | toInt hub}}' + realTimeKernel: + enabled: true + + - fileName: SriovNetwork.yaml # wave 100 + policyName: "group-du-sno-sriov-policy" + metadata: + name: sriov-nw-du-fh + spec: + resourceName: du_fh + vlan: '{{hub fromConfigMap "" "site-data-configmap" (printf "%s-sriov-network-vlan-1" .ManagedClusterName) | toInt hub}}' + + - fileName: SriovNetworkNodePolicy.yaml # wave 100 + policyName: "group-du-sno-sriov-policy" + metadata: + name: sriov-nnp-du-fh + spec: + deviceType: netdevice + isRdma: false + nicSelector: + pfNames: '{{hub fromConfigMap "" "group-hardware-types-configmap" (printf "%s-sriov-node-policy-pfNames-1" (index .ManagedClusterLabels "hardware-type")) | toLiteral hub}}' + numVfs: 8 + priority: 10 + resourceName: du_fh + + - fileName: SriovNetwork.yaml # wave 100 + policyName: "group-du-sno-sriov-policy" + metadata: + name: sriov-nw-du-mh + spec: + resourceName: du_mh + vlan: '{{hub fromConfigMap "" "site-data-configmap" (printf "%s-sriov-network-vlan-2" .ManagedClusterName) | toInt hub}}' + + - fileName: SriovNetworkNodePolicy.yaml # wave 100 + policyName: "group-du-sno-sriov-policy" + metadata: + name: sriov-nw-du-fh + spec: + deviceType: netdevice + isRdma: false + nicSelector: + pfNames: '{{hub fromConfigMap "" "group-hardware-types-configmap" (printf "%s-sriov-node-policy-pfNames-2" (index .ManagedClusterLabels "hardware-type")) | toLiteral hub}}' + numVfs: 8 + priority: 10 + resourceName: du_fh \ No newline at end of file diff --git a/snippets/ztp-the-policygenerator-single.yaml b/snippets/ztp-the-policygenerator-single.yaml new file mode 100644 index 000000000000..375cf3d09751 --- /dev/null +++ b/snippets/ztp-the-policygenerator-single.yaml @@ -0,0 +1,152 @@ +apiVersion: policy.open-cluster-management.io/v1 +kind: PolicyGenerator +metadata: + name: group-du-sno +placementBindingDefaults: + name: group-du-sno-placement-binding +policyDefaults: + namespace: ztp-group + placement: + labelSelector: + matchExpressions: + - key: group-du-sno + operator: Exists + remediationAction: inform + severity: low + namespaceSelector: + exclude: + - kube-* + include: + - '*' + evaluationInterval: + compliant: 10m + noncompliant: 10s +policies: + - name: group-du-sno-config-policy + policyAnnotations: + ran.openshift.io/ztp-deploy-wave: '10' + manifests: + - path: source-crs/PtpConfigSlave-MCP-master.yaml + patches: + - metadata: null + name: du-ptp-slave + namespace: openshift-ptp + annotations: + ran.openshift.io/ztp-deploy-wave: '10' + spec: + profile: + - name: slave + interface: $interface + ptp4lOpts: '-2 -s' + phc2sysOpts: '-a -r -n 24' + ptpSchedulingPolicy: SCHED_FIFO + ptpSchedulingPriority: 10 + ptpSettings: + logReduce: 'true' + ptp4lConf: | + [global] + # + # Default Data Set + # + twoStepFlag 1 + slaveOnly 1 + priority1 128 + priority2 128 + domainNumber 24 + #utc_offset 37 + clockClass 255 + clockAccuracy 0xFE + offsetScaledLogVariance 0xFFFF + free_running 0 + freq_est_interval 1 + dscp_event 0 + dscp_general 0 + dataset_comparison G.8275.x + G.8275.defaultDS.localPriority 128 + # + # Port Data Set + # + logAnnounceInterval -3 + logSyncInterval -4 + logMinDelayReqInterval -4 + logMinPdelayReqInterval -4 + announceReceiptTimeout 3 + syncReceiptTimeout 0 + delayAsymmetry 0 + fault_reset_interval -4 + neighborPropDelayThresh 20000000 + masterOnly 0 + G.8275.portDS.localPriority 128 + # + # Run time options + # + assume_two_step 0 + logging_level 6 + path_trace_enabled 0 + follow_up_info 0 + hybrid_e2e 0 + inhibit_multicast_service 0 + net_sync_monitor 0 + tc_spanning_tree 0 + tx_timestamp_timeout 50 + unicast_listen 0 + unicast_master_table 0 + unicast_req_duration 3600 + use_syslog 1 + verbose 0 + summary_interval 0 + kernel_leap 1 + check_fup_sync 0 + clock_class_threshold 7 + # + # Servo Options + # + pi_proportional_const 0.0 + pi_integral_const 0.0 + pi_proportional_scale 0.0 + pi_proportional_exponent -0.3 + pi_proportional_norm_max 0.7 + pi_integral_scale 0.0 + pi_integral_exponent 0.4 + pi_integral_norm_max 0.3 + step_threshold 2.0 + first_step_threshold 0.00002 + max_frequency 900000000 + clock_servo pi + sanity_freq_limit 200000000 + ntpshm_segment 0 + # + # Transport options + # + transportSpecific 0x0 + ptp_dst_mac 01:1B:19:00:00:00 + p2p_dst_mac 01:80:C2:00:00:0E + udp_ttl 1 + udp6_scope 0x0E + uds_address /var/run/ptp4l + # + # Default interface options + # + clock_type OC + network_transport L2 + delay_mechanism E2E + time_stamping hardware + tsproc_mode filter + delay_filter moving_median + delay_filter_length 10 + egressLatency 0 + ingressLatency 0 + boundary_clock_jbod 0 + # + # Clock description + # + productDescription ;; + revisionData ;; + manufacturerIdentity 00:00:00 + userDescription ; + timeSource 0xA0 + recommend: + - profile: slave + priority: 4 + match: + - nodeLabel: node-role.kubernetes.io/master diff --git a/snippets/ztp-the-policygentemplate-single.yaml b/snippets/ztp-the-policygentemplate-single.yaml new file mode 100644 index 000000000000..728f2e78d62c --- /dev/null +++ b/snippets/ztp-the-policygentemplate-single.yaml @@ -0,0 +1,20 @@ +apiVersion: ran.openshift.io/v1 +kind: PolicyGenTemplate +metadata: + name: "group-du-sno" + namespace: "ztp-group" +spec: + bindingRules: + group-du-sno: "" + mcp: "master" + sourceFiles: + - fileName: PtpConfigSlave.yaml + policyName: "config-policy" + metadata: + name: "du-ptp-slave" + spec: + profile: + - name: "slave" + interface: "ens5f0" + ptp4lOpts: "-2 -s --summary_interval -4" + phc2sysOpts: "-a -r -n 24" \ No newline at end of file diff --git a/snippets/ztp_99-sync-time-once-master.yaml b/snippets/ztp_99-sync-time-once-master.yaml index 0b5401585268..5195ba9fb877 100644 --- a/snippets/ztp_99-sync-time-once-master.yaml +++ b/snippets/ztp_99-sync-time-once-master.yaml @@ -13,7 +13,8 @@ spec: - contents: | [Unit] Description=Sync time once - After=network.service + After=network-online.target + Wants=network-online.target [Service] Type=oneshot TimeoutStartSec=300 diff --git a/snippets/ztp_99-sync-time-once-worker.yaml b/snippets/ztp_99-sync-time-once-worker.yaml index 59c6b9a6467d..2675a637ed2e 100644 --- a/snippets/ztp_99-sync-time-once-worker.yaml +++ b/snippets/ztp_99-sync-time-once-worker.yaml @@ -13,7 +13,7 @@ spec: - contents: | [Unit] Description=Sync time once - After=network.service + After=network-online.target [Service] Type=oneshot TimeoutStartSec=300 diff --git a/snippets/ztp_SriovOperatorConfig.yaml b/snippets/ztp_SriovOperatorConfig.yaml index fafae3ee7df6..270f7383e30b 100644 --- a/snippets/ztp_SriovOperatorConfig.yaml +++ b/snippets/ztp_SriovOperatorConfig.yaml @@ -3,7 +3,8 @@ kind: SriovOperatorConfig metadata: name: default namespace: openshift-sriov-network-operator - annotations: {} + annotations: + ran.openshift.io/ztp-deploy-wave: "10" spec: configDaemonNodeSelector: "node-role.kubernetes.io/$mcp": "" @@ -20,6 +21,8 @@ spec: # openshift.io/: "1" # requests: # openshift.io/: "1" - enableInjector: true - enableOperatorWebhook: true + enableInjector: false + enableOperatorWebhook: false + # Disable drain is needed for single-node OpenShift. + disableDrain: true logLevel: 0 diff --git a/snippets/ztp_TunedPerformancePatch.yaml b/snippets/ztp_TunedPerformancePatch.yaml index f7e8d90732e1..5e47d72b63f5 100644 --- a/snippets/ztp_TunedPerformancePatch.yaml +++ b/snippets/ztp_TunedPerformancePatch.yaml @@ -3,7 +3,8 @@ kind: Tuned metadata: name: performance-patch namespace: openshift-cluster-node-tuning-operator - annotations: {} + annotations: + ran.openshift.io/ztp-deploy-wave: "10" spec: profile: - name: performance-patch @@ -16,11 +17,10 @@ spec: [main] summary=Configuration changes profile inherited from performance created tuned include=openshift-node-performance-openshift-node-performance-profile - [sysctl] - kernel.timer_migration=1 [scheduler] group.ice-ptp=0:f:10:*:ice-ptp.* group.ice-gnss=0:f:10:*:ice-gnss.* + group.ice-dplls=0:f:10:*:ice-dplls.* [service] service.stalld=start,enable service.chronyd=stop,disable diff --git a/snippets/ztp_example-sno.yaml b/snippets/ztp_example-sno.yaml index 1c1f3ecc07be..6d7fec894272 100644 --- a/snippets/ztp_example-sno.yaml +++ b/snippets/ztp_example-sno.yaml @@ -1,6 +1,6 @@ # example-node1-bmh-secret & assisted-deployment-pull-secret need to be created under same namespace example-sno --- -apiVersion: ran.openshift.io/v2 +apiVersion: ran.openshift.io/v1 kind: SiteConfig metadata: name: "example-sno" @@ -12,96 +12,142 @@ spec: clusterImageSetNameRef: "openshift-4.10" sshPublicKey: "ssh-rsa AAAA..." clusters: - - clusterName: "example-sno" - networkType: "OVNKubernetes" - # installConfigOverrides is a generic way of passing install-config - # parameters through the siteConfig. The 'capabilities' field configures - # the composable openshift feature. In this 'capabilities' setting, we - # remove all but the marketplace component from the optional set of - # components. - # Notes: - # - OperatorLifecycleManager is needed for 4.15 and later - # - NodeTuning is needed for 4.13 and later, not for 4.12 and earlier - installConfigOverrides: "{\"capabilities\":{\"baselineCapabilitySet\": \"None\", \"additionalEnabledCapabilities\": [ \"OperatorLifecycleManager\", \"NodeTuning\" ] }}" - # It is strongly recommended to include crun manifests as part of the additional install-time manifests for 4.13+. - # The crun manifests can be obtained from source-crs/optional-extra-manifest/ and added to the git repo ie.sno-extra-manifest. - # extraManifestPath: sno-extra-manifest - clusterLabels: - # These example cluster labels correspond to the bindingRules in the PolicyGenTemplate examples - du-profile: "latest" - # These example cluster labels correspond to the bindingRules in the PolicyGenTemplate examples in ../policygentemplates: - # ../policygentemplates/common-ranGen.yaml will apply to all clusters with 'common: true' - common: true - # ../policygentemplates/group-du-sno-ranGen.yaml will apply to all clusters with 'group-du-sno: ""' - group-du-sno: "" - # ../policygentemplates/example-sno-site.yaml will apply to all clusters with 'sites: "example-sno"' - # Normally this should match or contain the cluster name so it only applies to a single cluster - sites: "example-sno" - clusterNetwork: - - cidr: 1001:1::/48 - hostPrefix: 64 - machineNetwork: - - cidr: 1111:2222:3333:4444::/64 - serviceNetwork: - - 1001:2::/112 - additionalNTPSources: - - 1111:2222:3333:4444::2 - # Initiates the cluster for workload partitioning. Setting specific reserved/isolated CPUSets is done via PolicyTemplate - # please see Workload Partitioning Feature for a complete guide. - cpuPartitioningMode: AllNodes - # Optionally; This can be used to override the KlusterletAddonConfig that is created for this cluster: - #crTemplates: - # KlusterletAddonConfig: "KlusterletAddonConfigOverride.yaml" - nodes: - - hostName: "example-node1.example.com" - role: "master" - # Optionally; This can be used to configure desired BIOS setting on a host: - #biosConfigRef: - # filePath: "example-hw.profile" - bmcAddress: "idrac-virtualmedia+https://[1111:2222:3333:4444::bbbb:1]/redfish/v1/Systems/System.Embedded.1" - bmcCredentialsName: - name: "example-node1-bmh-secret" - bootMACAddress: "AA:BB:CC:DD:EE:11" - # Use UEFISecureBoot to enable secure boot - bootMode: "UEFI" - rootDeviceHints: - wwn: "0x11111000000asd123" - # example of diskPartition below is used for image registry (check ImageRegistry.md for more details), but it's not limited to this use case - # diskPartition: - # - device: /dev/disk/by-id/wwn-0x11111000000asd123 # match rootDeviceHints - # partitions: - # - mount_point: /var/imageregistry - # size: 102500 - # start: 344844 - - nodeNetwork: + - clusterName: "example-sno" + networkType: "OVNKubernetes" + # installConfigOverrides is a generic way of passing install-config + # parameters through the siteConfig. The 'capabilities' field configures + # the composable openshift feature. In this 'capabilities' setting, we + # remove all but the marketplace component from the optional set of + # components. + # Notes: + # - OperatorLifecycleManager is needed for 4.15 and later + # - NodeTuning is needed for 4.13 and later, not for 4.12 and earlier + # - Ingress is needed for 4.16 and later + installConfigOverrides: | + { + "capabilities": { + "baselineCapabilitySet": "None", + "additionalEnabledCapabilities": [ + "NodeTuning", + "OperatorLifecycleManager" + "Ingress" + ] + } + } + # It is strongly recommended to include crun manifests as part of the additional install-time manifests for 4.13+. + # The crun manifests can be obtained from source-crs/optional-extra-manifest/ and added to the git repo ie.sno-extra-manifest. + # extraManifestPath: sno-extra-manifest + clusterLabels: + # These example cluster labels correspond to the bindingRules in the PolicyGenTemplate examples + du-profile: "latest" + # These example cluster labels correspond to the bindingRules in the PolicyGenTemplate examples in ../policygentemplates: + # ../policygentemplates/common-ranGen.yaml will apply to all clusters with 'common: true' + common: true + # ../policygentemplates/group-du-sno-ranGen.yaml will apply to all clusters with 'group-du-sno: ""' + group-du-sno: "" + # ../policygentemplates/example-sno-site.yaml will apply to all clusters with 'sites: "example-sno"' + # Normally this should match or contain the cluster name so it only applies to a single cluster + sites : "example-sno" + clusterNetwork: + - cidr: 1001:1::/48 + hostPrefix: 64 + machineNetwork: + - cidr: 1111:2222:3333:4444::/64 + serviceNetwork: + - 1001:2::/112 + additionalNTPSources: + - 1111:2222:3333:4444::2 + # Initiates the cluster for workload partitioning. Setting specific reserved/isolated CPUSets is done via PolicyTemplate + # please see Workload Partitioning Feature for a complete guide. + cpuPartitioningMode: AllNodes + # Optionally; This can be used to override the KlusterletAddonConfig that is created for this cluster: + #crTemplates: + # KlusterletAddonConfig: "KlusterletAddonConfigOverride.yaml" + nodes: + - hostName: "example-node1.example.com" + role: "master" + # Optionally; This can be used to configure desired BIOS setting on a host: + #biosConfigRef: + # filePath: "example-hw.profile" + bmcAddress: "idrac-virtualmedia+https://[1111:2222:3333:4444::bbbb:1]/redfish/v1/Systems/System.Embedded.1" + bmcCredentialsName: + name: "example-node1-bmh-secret" + bootMACAddress: "AA:BB:CC:DD:EE:11" + # Use UEFISecureBoot to enable secure boot + bootMode: "UEFI" + rootDeviceHints: + deviceName: "/dev/disk/by-path/pci-0000:01:00.0-scsi-0:2:0:0" + # disk partition at `/var/lib/containers` with ignitionConfigOverride. Some values must be updated. See DiskPartitionContainer.md for more details + ignitionConfigOverride: | + { + "ignition": { + "version": "3.2.0" + }, + "storage": { + "disks": [ + { + "device": "/dev/disk/by-id/wwn-0x6b07b250ebb9d0002a33509f24af1f62", + "partitions": [ + { + "label": "var-lib-containers", + "sizeMiB": 0, + "startMiB": 250000 + } + ], + "wipeTable": false + } + ], + "filesystems": [ + { + "device": "/dev/disk/by-partlabel/var-lib-containers", + "format": "xfs", + "mountOptions": [ + "defaults", + "prjquota" + ], + "path": "/var/lib/containers", + "wipeFilesystem": true + } + ] + }, + "systemd": { + "units": [ + { + "contents": "# Generated by Butane\n[Unit]\nRequires=systemd-fsck@dev-disk-by\\x2dpartlabel-var\\x2dlib\\x2dcontainers.service\nAfter=systemd-fsck@dev-disk-by\\x2dpartlabel-var\\x2dlib\\x2dcontainers.service\n\n[Mount]\nWhere=/var/lib/containers\nWhat=/dev/disk/by-partlabel/var-lib-containers\nType=xfs\nOptions=defaults,prjquota\n\n[Install]\nRequiredBy=local-fs.target", + "enabled": true, + "name": "var-lib-containers.mount" + } + ] + } + } + nodeNetwork: + interfaces: + - name: eno1 + macAddress: "AA:BB:CC:DD:EE:11" + config: interfaces: - name: eno1 - macAddress: "AA:BB:CC:DD:EE:11" - config: - interfaces: - - name: eno1 - type: ethernet - state: up - ipv4: - enabled: false - ipv6: - enabled: true - address: - # For SNO sites with static IP addresses, the node-specific, - # API and Ingress IPs should all be the same and configured on - # the interface - - ip: 1111:2222:3333:4444::aaaa:1 - prefix-length: 64 - dns-resolver: - config: - search: - - example.com - server: - - 1111:2222:3333:4444::2 - routes: - config: - - destination: ::/0 - next-hop-interface: eno1 - next-hop-address: 1111:2222:3333:4444::1 - table-id: 254 + type: ethernet + state: up + ipv4: + enabled: false + ipv6: + enabled: true + address: + # For SNO sites with static IP addresses, the node-specific, + # API and Ingress IPs should all be the same and configured on + # the interface + - ip: 1111:2222:3333:4444::aaaa:1 + prefix-length: 64 + dns-resolver: + config: + search: + - example.com + server: + - 1111:2222:3333:4444::2 + routes: + config: + - destination: ::/0 + next-hop-interface: eno1 + next-hop-address: 1111:2222:3333:4444::1 + table-id: 254 \ No newline at end of file diff --git a/storage/container_storage_interface/osd-persistent-storage-aws-efs-csi.adoc b/storage/container_storage_interface/osd-persistent-storage-aws-efs-csi.adoc index 4e68c0219b6d..acdccfad9fbf 100644 --- a/storage/container_storage_interface/osd-persistent-storage-aws-efs-csi.adoc +++ b/storage/container_storage_interface/osd-persistent-storage-aws-efs-csi.adoc @@ -16,7 +16,7 @@ This procedure is specific to the link:https://github.com/openshift/aws-efs-csi- {product-title} is capable of provisioning persistent volumes (PVs) using the link:https://github.com/openshift/aws-efs-csi-driver[AWS EFS CSI driver]. -Familiarity with link:https://access.redhat.com/documentation/en-us/openshift_container_platform/4.15/html-single/storage/index#persistent-storage-overview_understanding-persistent-storage[persistent storage] and link:https://access.redhat.com/documentation/en-us/openshift_container_platform/4.15/html-single/storage/index#persistent-storage-csi[configuring CSI volumes] is recommended when working with a CSI Operator and driver. +Familiarity with link:https://access.redhat.com/documentation/en-us/openshift_container_platform/4.16/html-single/storage/index#persistent-storage-overview_understanding-persistent-storage[persistent storage] and link:https://access.redhat.com/documentation/en-us/openshift_container_platform/4.16/html-single/storage/index#persistent-storage-csi[configuring CSI volumes] is recommended when working with a CSI Operator and driver. After installing the AWS EFS CSI Driver Operator, {product-title} installs the AWS EFS CSI Operator and the AWS EFS CSI driver by default in the `openshift-cluster-csi-drivers` namespace. This allows the AWS EFS CSI Driver Operator to create CSI-provisioned PVs that mount to AWS EFS assets. @@ -87,5 +87,5 @@ include::modules/persistent-storage-csi-olm-operator-uninstall.adoc[leveloffset= [role="_additional-resources"] == Additional resources -* link:https://access.redhat.com/documentation/en-us/openshift_container_platform/4.15/html-single/storage/index#persistent-storage-csi[Configuring CSI volumes] +* link:https://access.redhat.com/documentation/en-us/openshift_container_platform/4.16/html-single/storage/index#persistent-storage-csi[Configuring CSI volumes] diff --git a/storage/container_storage_interface/persistent-storage-csi-smb-cifs.adoc b/storage/container_storage_interface/persistent-storage-csi-smb-cifs.adoc new file mode 100644 index 000000000000..ab50f5ed9cee --- /dev/null +++ b/storage/container_storage_interface/persistent-storage-csi-smb-cifs.adoc @@ -0,0 +1,40 @@ +:_mod-docs-content-type: ASSEMBLY +[id="persistent-storage-csi-smb-cifs"] += CIFS/SMB CSI Driver Operator +include::_attributes/common-attributes.adoc[] +:context: persistent-storage-csi-smb-cifs + +toc::[] + +{product-title} is capable of provisioning persistent volumes (PVs) with a Container Storage Interface (CSI) driver for Common Internet File System (CIFS) dialect/Server Message Block (SMB) protocol. + +:FeatureName: CIFS/SMB CSI Driver Operator +include::snippets/technology-preview.adoc[leveloffset=+1] + +Familiarity with xref:../../storage/understanding-persistent-storage.adoc#understanding-persistent-storage[persistent storage] and xref:../../storage/container_storage_interface/persistent-storage-csi.adoc#persistent-storage-csi[configuring CSI volumes] is recommended when working with a CSI Operator and driver. + +After installing the CIFS/SMB CSI Driver Operator, {product-title} installs corresponding pods for the Operator and driver in the `openshift-cluster-csi-drivers` namespace by default. This allows the CIFS/SMB CSI Driver to create CSI-provisioned persistent volumes (PVs) that mount to CIFS/SMB shares. + +* The _CIFS/SMB CSI Driver Operator_, after being installed, does not create a storage class by default to use to create persistent volume claims (PVCs). However, xref:../../storage/container_storage_interface/persistent-storage-csi-smb-cifs.adoc#persistent-storage-csi-smb-cifs-provision-dynamic_persistent-storage-csi-smb-cifs[you can manually create the CIFS/SMB `StorageClass` for dynamic provisioning]. The CIFS/SMB CSI Driver Operator supports dynamic volume provisioning by allowing storage volumes to be created on-demand. +This eliminates the need for cluster administrators to pre-provision storage. + +* The _CIFS/SMB CSI driver_ enables you to create and mount CIFS/SMB PVs. + +include::modules/persistent-storage-csi-about.adoc[leveloffset=+1] + +include::modules/persistent-storage-csi-smb-cifs-limits.adoc[leveloffset=+1] + +:FeatureName: CIFS/SMB +include::modules/persistent-storage-csi-olm-operator-install.adoc[leveloffset=+1] + +:FeatureName: CIFS/SMB +include::modules/persistent-storage-csi-smb-cifs-operator-install-driver.adoc[leveloffset=+1] + +include::modules/persistent-storage-csi-smb-cifs-provision-dynamic.adoc[leveloffset=+1] + +include::modules/persistent-storage-csi-smb-cifs-provision-static.adoc[leveloffset=+1] + +[role="_additional-resources"] +[id="additional-resources_{context}"] +== Additional resources +* xref:../../storage/container_storage_interface/persistent-storage-csi.adoc#persistent-storage-csi[Configuring CSI volumes] diff --git a/storage/container_storage_interface/persistent-storage-csi-snapshots.adoc b/storage/container_storage_interface/persistent-storage-csi-snapshots.adoc index 421063d62510..f0682c6248f6 100644 --- a/storage/container_storage_interface/persistent-storage-csi-snapshots.adoc +++ b/storage/container_storage_interface/persistent-storage-csi-snapshots.adoc @@ -21,3 +21,8 @@ include::modules/persistent-storage-csi-snapshots-create.adoc[leveloffset=+1] include::modules/persistent-storage-csi-snapshots-delete.adoc[leveloffset=+1] include::modules/persistent-storage-csi-snapshots-restore.adoc[leveloffset=+1] + +include::modules/persistent-storage-csi-vsphere-change-max-snapshot.adoc[leveloffset=+1] + +== Additional resources +* link:https://kb.vmware.com/s/article/1025279[Best practices for using VMware snapshots in the vSphere environment] diff --git a/storage/container_storage_interface/persistent-storage-csi-vsphere.adoc b/storage/container_storage_interface/persistent-storage-csi-vsphere.adoc index fd03a85ff151..a97405ebebcf 100644 --- a/storage/container_storage_interface/persistent-storage-csi-vsphere.adoc +++ b/storage/container_storage_interface/persistent-storage-csi-vsphere.adoc @@ -16,7 +16,7 @@ To create CSI-provisioned persistent volumes (PVs) that mount to vSphere storage * *vSphere CSI Driver Operator*: The Operator provides a storage class, called `thin-csi`, that you can use to create persistent volumes claims (PVCs). The vSphere CSI Driver Operator supports dynamic volume provisioning by allowing storage volumes to be created on-demand, eliminating the need for cluster administrators to pre-provision storage. You can disable this default storage class if desired (see xref:../../storage/container_storage_interface/persistent-storage-csi-sc-manage.adoc#persistent-storage-csi-sc-manage[Managing the default storage class]). -* *vSphere CSI driver*: The driver enables you to create and mount vSphere PVs. In {product-title} 4.15, the driver version is 3.0.2. The vSphere CSI driver supports all of the file systems supported by the underlying Red Hat Core OS release, including XFS and Ext4. For more information about supported file systems, see link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/8/html/managing_file_systems/assembly_overview-of-available-file-systems_managing-file-systems[Overview of available file systems]. +* *vSphere CSI driver*: The driver enables you to create and mount vSphere PVs. In {product-title} 4.15, the driver version is 3.1.2. The vSphere CSI driver supports all of the file systems supported by the underlying Red Hat Core operating system release, including XFS and Ext4. For more information about supported file systems, see link:https://docs.redhat.com/en/documentation/red_hat_enterprise_linux/9/html/managing_file_systems/overview-of-available-file-systems_managing-file-systems[Overview of available file systems]. //Please update driver version as needed with each major OCP release starting with 4.13. @@ -96,5 +96,9 @@ include::modules/persistent-storage-csi-vsphere-top-aware-infra-top.adoc[levelof include::modules/persistent-storage-csi-vsphere-top-aware-results.adoc[leveloffset=+2] +include::modules/persistent-storage-csi-vsphere-change-max-snapshot.adoc[leveloffset=+1] + == Additional resources * xref:../../storage/container_storage_interface/persistent-storage-csi.adoc#persistent-storage-csi[Configuring CSI volumes] + +* link:https://kb.vmware.com/s/article/1025279[Best practices for using VMware snapshots in the vSphere environment] diff --git a/storage/persistent_storage/persistent_storage_local/persistent-storage-using-lvms.adoc b/storage/persistent_storage/persistent_storage_local/persistent-storage-using-lvms.adoc index fd4a0b14892f..f9c08fd1beb9 100644 --- a/storage/persistent_storage/persistent_storage_local/persistent-storage-using-lvms.adoc +++ b/storage/persistent_storage/persistent_storage_local/persistent-storage-using-lvms.adoc @@ -69,9 +69,13 @@ include::modules/lvms-about-adding-devices-to-a-vg.adoc[leveloffset=+2] [role="_additional-resources"] .Additional resources -* xref:../../../storage/persistent_storage/persistent_storage_local/persistent-storage-using-lvms.adoc#lvms-unsupported-devices_logical-volume-manager-storage[Devices not supported by {lvms}] +* xref:../../../installing/install_config/installing-customizing.adoc#installation-special-config-raid_installing-customizing[Configuring a RAID-enabled data volume] + +* xref:../../../installing/install_config/installing-customizing.adoc#installation-special-config-encrypt-disk_installing-customizing[About disk encryption] -* xref:../../../storage/persistent_storage/persistent_storage_local/persistent-storage-using-lvms.adoc#lvms-integrating-software-raid-arrays_logical-volume-manager-storage[Integrating software RAID arrays with {lvms}] +* xref:../../../installing/install_config/installing-customizing.adoc#installation-special-config-storage-procedure_installing-customizing[Configuring disk encryption and mirroring] + +* xref:../../../storage/persistent_storage/persistent_storage_local/persistent-storage-using-lvms.adoc#lvms-unsupported-devices_logical-volume-manager-storage[Devices not supported by {lvms}] // Devices not supported by LVMS include::modules/lvms-unsupported-devices.adoc[leveloffset=+2] @@ -83,17 +87,6 @@ include::modules/lvms-about-creating-lvmcluster-cr.adoc[leveloffset=+1] // Reusing a volume group from the previous LVM Storage installation include::modules/lvms-reusing-vg-from-prev-installation.adoc[leveloffset=+2] -//Integrating software RAID arrays -include::modules/lvms-integrating-software-raid-arrays.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* xref:../../../installing/install_config/installing-customizing.adoc#installation-special-config-raid_installing-customizing[Configuring a RAID-enabled data volume] -* link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/9/html/managing_storage_devices/managing-raid_managing-storage-devices#creating-a-software-raid-on-an-installed-system_managing-raid[Creating a software RAID on an installed system] -* link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/9/html/managing_storage_devices/managing-raid_managing-storage-devices#replacing-a-failed-disk-in-raid_managing-raid[Replacing a failed disk in RAID] -* link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/9/html/managing_storage_devices/managing-raid_managing-storage-devices#repairing-raid-disks_managing-raid[Repairing RAID disks] - include::modules/lvms-creating-lvms-cluster-using-cli.adoc[leveloffset=+2] [role="_additional-resources"] @@ -147,18 +140,18 @@ include::modules/lvms-scaling-storage-of-clusters-using-cli.adoc[leveloffset=+2] * xref:../../../storage/persistent_storage/persistent_storage_local/persistent-storage-using-lvms.adoc#lvms-unsupported-devices_logical-volume-manager-storage[Devices not supported by {lvms}] -* xref:../../../storage/persistent_storage/persistent_storage_local/persistent-storage-using-lvms.adoc#lvms-integrating-software-raid-arrays_logical-volume-manager-storage[Integrating software RAID arrays with {lvms}] +* xref:../../../storage/persistent_storage/persistent_storage_local/persistent-storage-using-lvms.adoc#about-adding-devices-to-a-vg_logical-volume-manager-storage[About adding devices to a volume group] include::modules/lvms-scaling-storage-of-clusters-using-web-console.adoc[leveloffset=+2] [role="_additional-resources"] .Additional resources -* xref:../../../storage/persistent_storage/persistent_storage_local/persistent-storage-using-lvms.adoc#lvms-unsupported-devices_logical-volume-manager-storage[Devices not supported by {lvms}] +* xref:../../../storage/persistent_storage/persistent_storage_local/persistent-storage-using-lvms.adoc#about-lvmcluster_logical-volume-manager-storage[About the LVMCluster custom resource] -* xref:../../../storage/persistent_storage/persistent_storage_local/persistent-storage-using-lvms.adoc#lvms-integrating-software-raid-arrays_logical-volume-manager-storage[Integrating software RAID arrays with {lvms}] +* xref:../../../storage/persistent_storage/persistent_storage_local/persistent-storage-using-lvms.adoc#lvms-unsupported-devices_logical-volume-manager-storage[Devices not supported by {lvms}] -* xref:../../../storage/persistent_storage/persistent_storage_local/persistent-storage-using-lvms.adoc#about-lvmcluster_logical-volume-manager-storage[About the LVMCluster custom resource] +* xref:../../../storage/persistent_storage/persistent_storage_local/persistent-storage-using-lvms.adoc#about-adding-devices-to-a-vg_logical-volume-manager-storage[About adding devices to a volume group] include::modules/lvms-scaling-storage-of-clusters-using-rhacm.adoc[leveloffset=+2] @@ -171,7 +164,7 @@ include::modules/lvms-scaling-storage-of-clusters-using-rhacm.adoc[leveloffset=+ * xref:../../../storage/persistent_storage/persistent_storage_local/persistent-storage-using-lvms.adoc#lvms-unsupported-devices_logical-volume-manager-storage[Devices not supported by {lvms}] -* xref:../../../storage/persistent_storage/persistent_storage_local/persistent-storage-using-lvms.adoc#lvms-integrating-software-raid-arrays_logical-volume-manager-storage[Integrating software RAID arrays with {lvms}] +* xref:../../../storage/persistent_storage/persistent_storage_local/persistent-storage-using-lvms.adoc#about-adding-devices-to-a-vg_logical-volume-manager-storage[About adding devices to a volume group] // Expanding PVCs include::modules/lvms-scaling-storage-expand-pvc.adoc[leveloffset=+1] @@ -212,6 +205,7 @@ include::modules/lvms-monitoring-logical-volume-manager-operator.adoc[leveloffse // Uninstalling LVM Storage +include::modules/lvms-uninstalling-logical-volume-manager-operator-using-openshift-cli.adoc[leveloffset=+1] include::modules/lvms-uninstalling-logical-volume-manager-operator-using-openshift-web-console.adoc[leveloffset=+1] include::modules/lvms-uninstalling-logical-volume-manager-operator-using-rhacm.adoc[leveloffset=+1] @@ -222,3 +216,36 @@ include::modules/lvms-download-log-files-and-diagnostics.adoc[leveloffset=+1] .Additional resources * xref:../../../support/gathering-cluster-data.adoc#about-must-gather_gathering-cluster-data[About the must-gather tool] + +//Troubleshooting local persistent storage using LVM Storage + +include::modules/lvms-troubleshooting-persistent-storage.adoc[leveloffset=+1] + +include::modules/lvms-troubleshooting-investigating-a-pvc-stuck-in-the-pending-state.adoc[leveloffset=+2] + +include::modules/lvms-troubleshooting-recovering-from-missing-lvms-or-operator-components.adoc[leveloffset=+2] + +[role="_additional-resources"] +.Additional resources + +* xref:../../../storage/persistent_storage/persistent_storage_local/persistent-storage-using-lvms.adoc#about-lvmcluster_logical-volume-manager-storage[About the LVMCluster custom resource] + +* xref:../../../storage/persistent_storage/persistent_storage_local/persistent-storage-using-lvms.adoc#about-creating-lvmcluster-cr_logical-volume-manager-storage[Ways to create an LVMCluster custom resource] + +include::modules/lvms-troubleshooting-recovering-from-node-failure.adoc[leveloffset=+2] + +[role="_additional-resources"] +[id="additional-resources-forced-cleanup-1"] +.Additional resources + +* xref:../../../storage/persistent_storage/persistent_storage_local/persistent-storage-using-lvms.adoc#performing-a-forced-cleanup_logical-volume-manager-storage[Performing a forced clean-up] + +include::modules/lvms-troubleshooting-recovering-from-disk-failure.adoc[leveloffset=+2] + +[role="_additional-resources"] +[id="additional-resources-forced-cleanup-2"] +.Additional resources + +* xref:../../../storage/persistent_storage/persistent_storage_local/persistent-storage-using-lvms.adoc#performing-a-forced-cleanup_logical-volume-manager-storage[Performing a forced clean-up] + +include::modules/lvms-troubleshooting-performing-a-forced-cleanup.adoc[leveloffset=+2] \ No newline at end of file diff --git a/storage/persistent_storage/persistent_storage_local/troubleshooting-local-persistent-storage-using-lvms.adoc b/storage/persistent_storage/persistent_storage_local/troubleshooting-local-persistent-storage-using-lvms.adoc deleted file mode 100644 index 3ae4d7e76db5..000000000000 --- a/storage/persistent_storage/persistent_storage_local/troubleshooting-local-persistent-storage-using-lvms.adoc +++ /dev/null @@ -1,31 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="troubleshooting-local-persistent-storage"] -= Troubleshooting local persistent storage using LVMS -include::_attributes/common-attributes.adoc[] -:context: troubleshooting-local-persistent-storage-using-lvms - -toc::[] - -Because {product-title} does not scope a persistent volume (PV) to a single project, it can be shared across the cluster and claimed by any project using a persistent volume claim (PVC). This can lead to a number of issues that require troubleshooting. - -include::modules/lvms-troubleshooting-investigating-a-pvc-stuck-in-the-pending-state.adoc[leveloffset=+1] - -include::modules/lvms-troubleshooting-recovering-from-missing-lvms-or-operator-components.adoc[leveloffset=+1] - -include::modules/lvms-troubleshooting-recovering-from-node-failure.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources-forced-cleanup-1"] -.Additional resources - -* xref:../../../troubleshooting-local-persistent-storage-using-lvms.adoc#performing-a-forced-cleanup_troubleshooting-local-persistent-storage-using-lvms[Performing a forced cleanup] - -include::modules/lvms-troubleshooting-recovering-from-disk-failure.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources-forced-cleanup-2"] -.Additional resources - -* xref:../../../troubleshooting-local-persistent-storage-using-lvms.adoc#performing-a-forced-cleanup_troubleshooting-local-persistent-storage-using-lvms[Performing a forced cleanup] - -include::modules/lvms-troubleshooting-performing-a-forced-cleanup.adoc[leveloffset=+1] diff --git a/storage/persistent_storage/rosa-persistent-storage-aws-efs-csi.adoc b/storage/persistent_storage/rosa-persistent-storage-aws-efs-csi.adoc index 1f25cab6ce69..a54a204a5af2 100644 --- a/storage/persistent_storage/rosa-persistent-storage-aws-efs-csi.adoc +++ b/storage/persistent_storage/rosa-persistent-storage-aws-efs-csi.adoc @@ -16,7 +16,7 @@ This procedure is specific to the Amazon Web Services Elastic File System (AWS E {product-title} is capable of provisioning persistent volumes (PVs) using the Container Storage Interface (CSI) driver for AWS Elastic File Service (EFS). -Familiarity with link:https://access.redhat.com/documentation/en-us/openshift_container_platform/4.15/html-single/storage/index#persistent-storage-overview_understanding-persistent-storage[persistent storage] and link:https://access.redhat.com/documentation/en-us/openshift_container_platform/4.15/html-single/storage/index#persistent-storage-csi[configuring CSI volumes] is recommended when working with a CSI Operator and driver. +Familiarity with link:https://access.redhat.com/documentation/en-us/openshift_container_platform/4.16/html-single/storage/index#persistent-storage-overview_understanding-persistent-storage[persistent storage] and link:https://access.redhat.com/documentation/en-us/openshift_container_platform/4.16/html-single/storage/index#persistent-storage-csi[configuring CSI volumes] is recommended when working with a CSI Operator and driver. After installing the AWS EFS CSI Driver Operator, {product-title} installs the AWS EFS CSI Operator and the AWS EFS CSI driver by default in the `openshift-cluster-csi-drivers` namespace. This allows the AWS EFS CSI Driver Operator to create CSI-provisioned PVs that mount to AWS EFS assets. @@ -51,7 +51,7 @@ include::modules/persistent-storage-csi-efs-sts.adoc[leveloffset=+1] * xref:../../storage/persistent_storage/rosa-persistent-storage-aws-efs-csi.adoc#persistent-storage-csi-olm-operator-install_rosa-persistent-storage-aws-efs-csi[Installing the AWS EFS CSI Driver Operator] -* link:https://access.redhat.com/documentation/en-us/openshift_container_platform/4.15/html-single/authentication_and_authorization/index#cco-ccoctl-configuring_cco-mode-sts[Configuring the Cloud Credential Operator utility] +* link:https://access.redhat.com/documentation/en-us/openshift_container_platform/4.16/html-single/authentication_and_authorization/index#cco-ccoctl-configuring_cco-mode-sts[Configuring the Cloud Credential Operator utility] :StorageClass: AWS EFS :Provisioner: efs.csi.aws.com @@ -80,5 +80,5 @@ include::modules/persistent-storage-csi-olm-operator-uninstall.adoc[leveloffset= [role="_additional-resources"] == Additional resources -* link:https://access.redhat.com/documentation/en-us/openshift_container_platform/4.15/html-single/storage/index#persistent-storage-csi[Configuring CSI volumes] +* link:https://access.redhat.com/documentation/en-us/openshift_container_platform/4.16/html-single/storage/index#persistent-storage-csi[Configuring CSI volumes] diff --git a/support/troubleshooting/rosa-managed-resources.adoc b/support/troubleshooting/rosa-managed-resources.adoc index 75a293bc3a3d..5216afbbd599 100644 --- a/support/troubleshooting/rosa-managed-resources.adoc +++ b/support/troubleshooting/rosa-managed-resources.adoc @@ -28,7 +28,7 @@ include::https://raw.githubusercontent.com/openshift/managed-cluster-config/mast [id="rosa-add-on-managed-namespaces"] == {product-title} add-on namespaces -{product-title} add-ons are services available for installation after cluster installation. These additional services include {openshift-dev-spaces-productname}, Red Hat OpenShift API Management, and Cluster Logging Operator. Any changes to resources within the following namespaces can be overridden by the add-on during upgrades, which can lead to unsupported configurations for the add-on functionality. +{product-title} add-ons are services available for installation after cluster installation. These additional services include {openshift-dev-spaces-productname}, Red{nbsp}Hat OpenShift API Management, and Cluster Logging Operator. Any changes to resources within the following namespaces can be overridden by the add-on during upgrades, which can lead to unsupported configurations for the add-on functionality. .List of add-on managed namespaces [%collapsible] diff --git a/support/troubleshooting/troubleshooting-network-issues.adoc b/support/troubleshooting/troubleshooting-network-issues.adoc index 2df95543a80f..7dcb5ca520e2 100644 --- a/support/troubleshooting/troubleshooting-network-issues.adoc +++ b/support/troubleshooting/troubleshooting-network-issues.adoc @@ -21,8 +21,8 @@ include::modules/configuring-ovs-log-level-permanently.adoc[leveloffset=+2] [role="_additional-resources"] .Additional resources -* xref:../../post_installation_configuration/machine-configuration-tasks.adoc#understanding-the-machine-config-operator[Understanding the Machine Config Operator] +* xref:../../machine_configuration/index.adoc#machine-config-operator_machine-config-overview[Understanding the Machine Config Operator] -* xref:../../post_installation_configuration/machine-configuration-tasks.adoc#checking-mco-status_post-install-machine-configuration-tasks[Checking machine config pool status] +* xref:../../machine_configuration/index.adoc#checking-mco-status_machine-config-overview[Checking machine config pool status] include::modules/displaying-ovs-logs.adoc[leveloffset=+2] diff --git a/telco_ref_design_specs/core/telco-core-ref-design-components.adoc b/telco_ref_design_specs/core/telco-core-ref-design-components.adoc index 63354160848d..7e0a011b1107 100644 --- a/telco_ref_design_specs/core/telco-core-ref-design-components.adoc +++ b/telco_ref_design_specs/core/telco-core-ref-design-components.adoc @@ -14,102 +14,102 @@ include::modules/telco-core-cpu-partitioning-performance-tune.adoc[leveloffset=+ [role="_additional-resources"] .Additional resources -* link:https://docs.openshift.com/container-platform/4.15/scalability_and_performance/low_latency_tuning/cnf-tuning-low-latency-nodes-with-perf-profile.html#cnf-cpu-infra-container_cnf-master[Tuning nodes for low latency with the performance profile] +* link:https://docs.openshift.com/container-platform/4.16/scalability_and_performance/low_latency_tuning/cnf-tuning-low-latency-nodes-with-perf-profile.html#cnf-cpu-infra-container_cnf-master[Tuning nodes for low latency with the performance profile] -* link:https://docs.openshift.com/container-platform/4.15/scalability_and_performance/ztp_far_edge/ztp-reference-cluster-configuration-for-vdu.html#ztp-du-configuring-host-firmware-requirements_sno-configure-for-vdu[Configuring host firmware for low latency and high performance] +* link:https://docs.openshift.com/container-platform/4.16/scalability_and_performance/ztp_far_edge/ztp-reference-cluster-configuration-for-vdu.html#ztp-du-configuring-host-firmware-requirements_sno-configure-for-vdu[Configuring host firmware for low latency and high performance] include::modules/telco-core-service-mesh.adoc[leveloffset=+1] [role="_additional-resources"] .Additional resources -* link:https://docs.openshift.com/container-platform/4.15/service_mesh/v2x/ossm-about.html[About OpenShift Service Mesh] +* link:https://docs.openshift.com/container-platform/4.16/service_mesh/v2x/ossm-about.html[About OpenShift Service Mesh] include::modules/telco-core-rds-networking.adoc[leveloffset=+1] [role="_additional-resources"] .Additional resources -* link:https://docs.openshift.com/container-platform/4.15/networking/understanding-networking.html[Understanding networking] +* link:https://docs.openshift.com/container-platform/4.16/networking/understanding-networking.html[Understanding networking] include::modules/telco-core-cluster-network-operator.adoc[leveloffset=+2] [role="_additional-resources"] .Additional resources -* link:https://docs.openshift.com/container-platform/4.15/networking/cluster-network-operator.html#nw-cluster-network-operator_cluster-network-operator[Cluster Network Operator] +* link:https://docs.openshift.com/container-platform/4.16/networking/cluster-network-operator.html#nw-cluster-network-operator_cluster-network-operator[Cluster Network Operator] include::modules/telco-core-load-balancer.adoc[leveloffset=+2] [role="_additional-resources"] .Additional resources -* link:https://docs.openshift.com/container-platform/4.15/networking/metallb/about-metallb.html[About MetalLB and the MetalLB Operator] +* link:https://docs.openshift.com/container-platform/4.16/networking/metallb/about-metallb.html[About MetalLB and the MetalLB Operator] include::modules/telco-core-sriov.adoc[leveloffset=+2] [role="_additional-resources"] .Additional resources -* link:https://docs.openshift.com/container-platform/4.15/networking/hardware_networks/about-sriov.html[About SR-IOV hardware networks] +* link:https://docs.openshift.com/container-platform/4.16/networking/hardware_networks/about-sriov.html[About SR-IOV hardware networks] include::modules/telco-nmstate-operator.adoc[leveloffset=+2] [role="_additional-resources"] .Additional resources -* link:https://docs.openshift.com/container-platform/4.15/networking/k8s_nmstate/k8s-nmstate-about-the-k8s-nmstate-operator.html[About the Kubernetes NMState Operator] +* link:https://docs.openshift.com/container-platform/4.16/networking/k8s_nmstate/k8s-nmstate-about-the-k8s-nmstate-operator.html[About the Kubernetes NMState Operator] include::modules/telco-core-logging.adoc[leveloffset=+1] [role="_additional-resources"] .Additional resources -* link:https://docs.openshift.com/container-platform/4.15/observability/logging/cluster-logging.html[About logging] +* link:https://docs.openshift.com/container-platform/4.16/observability/logging/cluster-logging.html[About logging] include::modules/telco-core-power-management.adoc[leveloffset=+1] [role="_additional-resources"] .Additional resources -* link:https://docs.openshift.com/container-platform/4.15/scalability_and_performance/low_latency_tuning/cnf-tuning-low-latency-nodes-with-perf-profile.html#cnf-configuring-power-saving-for-nodes_cnf-low-latency-perf-profile[Configuring power saving for nodes that run colocated high and low priority workloads] +* link:https://docs.openshift.com/container-platform/4.16/scalability_and_performance/low_latency_tuning/cnf-tuning-low-latency-nodes-with-perf-profile.html#cnf-configuring-power-saving-for-nodes_cnf-low-latency-perf-profile[Configuring power saving for nodes that run colocated high and low priority workloads] include::modules/telco-core-storage.adoc[leveloffset=+1] [role="_additional-resources"] .Additional resources -* link:https://access.redhat.com/documentation/en-us/red_hat_openshift_data_foundation/4.15[Product Documentation for Red Hat OpenShift Data Foundation] +* link:https://access.redhat.com/documentation/en-us/red_hat_openshift_data_foundation/4.16[Product Documentation for Red Hat OpenShift Data Foundation] include::modules/telco-core-monitoring.adoc[leveloffset=+1] [role="_additional-resources"] .Additional resources -* link:https://docs.openshift.com/container-platform/4.15/observability/monitoring/monitoring-overview.html#about-openshift-monitoring[About {product-version} monitoring] +* link:https://docs.openshift.com/container-platform/4.16/observability/monitoring/monitoring-overview.html#about-openshift-monitoring[About {product-version} monitoring] include::modules/telco-core-scheduling.adoc[leveloffset=+1] [role="_additional-resources"] .Additional resources -* link:https://docs.openshift.com/container-platform/4.15/nodes/scheduling/nodes-scheduler-about.html[Controlling pod placement using the scheduler] +* link:https://docs.openshift.com/container-platform/4.16/nodes/scheduling/nodes-scheduler-about.html[Controlling pod placement using the scheduler] -* link:https://docs.openshift.com/container-platform/4.15/scalability_and_performance/cnf-numa-aware-scheduling.html[Scheduling NUMA-aware workloads] +* link:https://docs.openshift.com/container-platform/4.16/scalability_and_performance/cnf-numa-aware-scheduling.html[Scheduling NUMA-aware workloads] include::modules/telco-core-installation.adoc[leveloffset=+1] [role="_additional-resources"] .Additional resources -* link:https://docs.openshift.com/container-platform/4.15/installing/installing_with_agent_based_installer/installing-with-agent-based-installer.html[Installing an {product-title} cluster with the Agent-based Installer] +* link:https://docs.openshift.com/container-platform/4.16/installing/installing_with_agent_based_installer/installing-with-agent-based-installer.html[Installing an {product-title} cluster with the Agent-based Installer] include::modules/telco-core-security.adoc[leveloffset=+1] [role="_additional-resources"] .Additional resources -* link:https://docs.openshift.com/container-platform/4.15/authentication/managing-security-context-constraints.html[Managing security context constraints] +* link:https://docs.openshift.com/container-platform/4.16/authentication/managing-security-context-constraints.html[Managing security context constraints] include::modules/telco-core-scalability.adoc[leveloffset=+1] @@ -121,7 +121,7 @@ include::modules/telco-core-rds-disconnected.adoc[leveloffset=+2] [role="_additional-resources"] .Additional resources -* link:https://docs.openshift.com/container-platform/4.15/updating/updating_a_cluster/updating_disconnected_cluster/index.html[About cluster updates in a disconnected environment] +* link:https://docs.openshift.com/container-platform/4.16/updating/updating_a_cluster/updating_disconnected_cluster/index.html[About cluster updates in a disconnected environment] include::modules/telco-core-kernel.adoc[leveloffset=+2] diff --git a/telco_ref_design_specs/ran/telco-ran-ref-du-components.adoc b/telco_ref_design_specs/ran/telco-ran-ref-du-components.adoc index e5e9c8197f7f..86d9afeed9a0 100644 --- a/telco_ref_design_specs/ran/telco-ran-ref-du-components.adoc +++ b/telco_ref_design_specs/ran/telco-ran-ref-du-components.adoc @@ -45,9 +45,9 @@ include::modules/telco-ran-gitops-operator-and-ztp-plugins.adoc[leveloffset=+2] [role="_additional-resources"] .Additional resources -* link:https://docs.openshift.com/container-platform/4.15/scalability_and_performance/ztp_far_edge/ztp-preparing-the-hub-cluster.html#ztp-preparing-the-ztp-git-repository-ver-ind_ztp-preparing-the-hub-cluster[Preparing the {ztp} site configuration repository for version independence] +* link:https://docs.openshift.com/container-platform/4.16/scalability_and_performance/ztp_far_edge/ztp-preparing-the-hub-cluster.html#ztp-preparing-the-ztp-git-repository-ver-ind_ztp-preparing-the-hub-cluster[Preparing the {ztp} site configuration repository for version independence] -* link:https://docs.openshift.com/container-platform/4.15/scalability_and_performance/ztp_far_edge/ztp-advanced-policy-config.html#ztp-adding-new-content-to-gitops-ztp_ztp-advanced-policy-config[Adding custom content to the {ztp} pipeline] +* link:https://docs.openshift.com/container-platform/4.16/scalability_and_performance/ztp_far_edge/ztp-advanced-policy-config.html#ztp-adding-new-content-to-gitops-ztp_ztp-advanced-policy-config[Adding custom content to the {ztp} pipeline] include::modules/telco-ran-agent-based-installer-abi.adoc[leveloffset=+2] diff --git a/telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc b/telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc index 1ca5d0e2cc22..5807fa4dcc11 100644 --- a/telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc +++ b/telco_ref_design_specs/ran/telco-ran-ref-du-crs.adoc @@ -14,7 +14,7 @@ CR fields you can change are annotated in the CR with YAML comments. [NOTE] ==== You can extract the complete set of RAN DU CRs from the `ztp-site-generate` container image. -See link:https://docs.openshift.com/container-platform/4.15/scalability_and_performance/ztp_far_edge/ztp-preparing-the-hub-cluster.html#ztp-preparing-the-ztp-git-repository_ztp-preparing-the-hub-cluster[Preparing the GitOps ZTP site configuration repository] for more information. +See link:https://docs.openshift.com/container-platform/4.16/scalability_and_performance/ztp_far_edge/ztp-preparing-the-hub-cluster.html#ztp-preparing-the-ztp-git-repository_ztp-preparing-the-hub-cluster[Preparing the GitOps ZTP site configuration repository] for more information. ==== include::modules/telco-ran-crs-day-2-operators.adoc[leveloffset=+1] diff --git a/updating/preparing_for_updates/kmm-preflight-validation.adoc b/updating/preparing_for_updates/kmm-preflight-validation.adoc index 5d5da309c730..50077bddc0bd 100644 --- a/updating/preparing_for_updates/kmm-preflight-validation.adoc +++ b/updating/preparing_for_updates/kmm-preflight-validation.adoc @@ -11,8 +11,9 @@ WARNING: This assembly has been moved into a subdirectory for 4.14+. Changes to To do: Remove this comment once 4.13 docs are EOL. //// +// Updated for TELCODOCS-1848 -Before performing an upgrade on the cluster with applied KMM modules, the administrator must verify that kernel modules installed using KMM are able to be installed on the nodes after the cluster upgrade and possible kernel upgrade. Preflight attempts to validate every `Module` loaded in the cluster, in parallel. Preflight does not wait for validation of one `Module` to complete before starting validation of another `Module`. +Before performing an upgrade on the cluster with applied KMM modules, you must verify that kernel modules installed using KMM are able to be installed on the nodes after the cluster upgrade and possible kernel upgrade. Preflight attempts to validate every `Module` loaded in the cluster, in parallel. Preflight does not wait for validation of one `Module` to complete before starting validation of another `Module`. :FeatureName: Kernel Module Management Operator Preflight validation diff --git a/updating/preparing_for_updates/updating-cluster-prepare.adoc b/updating/preparing_for_updates/updating-cluster-prepare.adoc index 47ac934e3a66..1b0c07ff8e24 100644 --- a/updating/preparing_for_updates/updating-cluster-prepare.adoc +++ b/updating/preparing_for_updates/updating-cluster-prepare.adoc @@ -1,6 +1,6 @@ :_mod-docs-content-type: ASSEMBLY [id="updating-cluster-prepare"] -= Preparing to update to {product-title} 4.15 += Preparing to update to {product-title} 4.16 include::_attributes/common-attributes.adoc[] :context: updating-cluster-prepare @@ -27,15 +27,11 @@ Without the correct micro-architecture requirements, the update process will fai [id="kube-api-removals_{context}"] == Kubernetes API removals -There are no Kubernetes API removals in {product-title} 4.15. +{product-title} 4.16 uses Kubernetes 1.29, which removed several deprecated APIs. -// Commenting out this section because there are no APIs being removed in OCP 4.15 / Kube 1.28. But we'll need this section again for 4.16 -//// -{product-title} 4.14 uses Kubernetes 1.27, which removed several deprecated APIs. - -A cluster administrator must provide a manual acknowledgment before the cluster can be updated from {product-title} 4.13 to 4.14. This is to help prevent issues after upgrading to {product-title} 4.14, where APIs that have been removed are still in use by workloads, tools, or other components running on or interacting with the cluster. Administrators must evaluate their cluster for any APIs in use that will be removed and migrate the affected components to use the appropriate new API version. After this evaluation and migration is complete, the administrator can provide the acknowledgment. +A cluster administrator must provide a manual acknowledgment before the cluster can be updated from {product-title} 4.15 to 4.16. This is to help prevent issues after upgrading to {product-title} 4.16, where APIs that have been removed are still in use by workloads, tools, or other components running on or interacting with the cluster. Administrators must evaluate their cluster for any APIs in use that will be removed and migrate the affected components to use the appropriate new API version. After this evaluation and migration is complete, the administrator can provide the acknowledgment. -Before you can update your {product-title} 4.13 cluster to 4.14, you must provide the administrator acknowledgment. +Before you can update your {product-title} 4.15 cluster to 4.16, you must provide the administrator acknowledgment. // Removed Kubernetes APIs include::modules/update-preparing-list.adoc[leveloffset=+2] @@ -59,7 +55,6 @@ include::modules/update-preparing-migrate.adoc[leveloffset=+2] // Providing the administrator acknowledgment include::modules/update-preparing-ack.adoc[leveloffset=+2] -//// // Assessing the risk of conditional updates include::modules/update-preparing-conditional.adoc[leveloffset=+1] diff --git a/updating/understanding_updates/how-updates-work.adoc b/updating/understanding_updates/how-updates-work.adoc index 47f5da1e0347..2d05421784d1 100644 --- a/updating/understanding_updates/how-updates-work.adoc +++ b/updating/understanding_updates/how-updates-work.adoc @@ -42,4 +42,4 @@ include::modules/update-mco-process.adoc[leveloffset=+1] [role="_additional-resources"] .Additional resources -* xref:../../post_installation_configuration/machine-configuration-tasks.adoc#machine-config-overview-post-install-machine-configuration-tasks[Machine config overview] \ No newline at end of file +* xref:../../machine_configuration/index.adoc#machine-config-overview[Machine Config Overview] \ No newline at end of file diff --git a/updating/understanding_updates/intro-to-updates.adoc b/updating/understanding_updates/intro-to-updates.adoc index 8c4b6c9ce4cb..be3744dab863 100644 --- a/updating/understanding_updates/intro-to-updates.adoc +++ b/updating/understanding_updates/intro-to-updates.adoc @@ -55,7 +55,7 @@ include::modules/update-common-terms.adoc[leveloffset=+1] [role="_additional-resources"] .Additional resources -* xref:../../post_installation_configuration/machine-configuration-tasks.adoc#machine-config-overview-post-install-machine-configuration-tasks[Machine config overview] +* xref:../../machine_configuration/index.adoc#machine-config-overview[Machine Config Overview] ifdef::openshift-enterprise[] * xref:../../updating/updating_a_cluster/updating_disconnected_cluster/disconnected-update-osus.adoc#update-service-overview_updating-restricted-network-cluster-osus[Using the OpenShift Update Service in a disconnected environment] * xref:../../updating/understanding_updates/understanding-update-channels-release.adoc#understanding-update-channels_understanding-update-channels-releases[Update channels] diff --git a/updating/understanding_updates/understanding-openshift-update-duration.adoc b/updating/understanding_updates/understanding-openshift-update-duration.adoc index 5a496028f444..2d3aa7fce587 100644 --- a/updating/understanding_updates/understanding-openshift-update-duration.adoc +++ b/updating/understanding_updates/understanding-openshift-update-duration.adoc @@ -33,7 +33,7 @@ include::modules/update-duration-mco.adoc[leveloffset=+2] [role="_additional-resources"] .Additional resources -* xref:../../post_installation_configuration/machine-configuration-tasks.adoc#machine-config-overview-post-install-machine-configuration-tasks[Machine config overview] +* xref:../../machine_configuration/index.adoc#machine-config-overview[Machine Config Overview] * xref:../../nodes/pods/nodes-pods-configuring.adoc#nodes-pods-configuring-pod-distruption-about_nodes-pods-configuring[Pod disruption budget] //Example update duration of cluster Operators @@ -63,4 +63,4 @@ endif::openshift-origin[] == Additional resources * xref:../../architecture/architecture.adoc#architecture[OpenShift Container Platform architecture] -* xref:../../updating/understanding_updates/intro-to-updates.adoc#understanding-openshift-updates[OpenShift Container Platform updates] \ No newline at end of file +* xref:../../updating/understanding_updates/intro-to-updates.adoc#understanding-openshift-updates[OpenShift Container Platform updates] diff --git a/updating/updating_a_cluster/eus-eus-update.adoc b/updating/updating_a_cluster/eus-eus-update.adoc index 10acd3f2e5e5..0d6df464051c 100644 --- a/updating/updating_a_cluster/eus-eus-update.adoc +++ b/updating/updating_a_cluster/eus-eus-update.adoc @@ -26,7 +26,6 @@ There are a number of caveats to consider when attempting an EUS-to-EUS update. * EUS-to-EUS updates are only offered after updates between all versions involved have been made available in `stable` channels. * If you encounter issues during or after updating to the odd-numbered minor version but before updating to the next even-numbered version, then remediation of those issues may require that non-control plane hosts complete the update to the odd-numbered version before moving forward. * You can do a partial update by updating the worker or custom pool nodes to accommodate the time it takes for maintenance. -* You can complete the update process during multiple maintenance windows by pausing at intermediate steps. However, plan to complete the entire update within 60 days. This is critical to ensure that normal cluster automation processes are completed. * Until the machine config pools are unpaused and the update is complete, some features and bugs fixes in <4.y+1> and <4.y+2> of {product-title} are not available. diff --git a/updating/updating_a_cluster/migrating-to-multi-payload.adoc b/updating/updating_a_cluster/migrating-to-multi-payload.adoc index 869005a62142..0420a0052d7a 100644 --- a/updating/updating_a_cluster/migrating-to-multi-payload.adoc +++ b/updating/updating_a_cluster/migrating-to-multi-payload.adoc @@ -14,7 +14,9 @@ To do: Remove this comment once 4.13 docs are EOL. You can migrate your current cluster with single-architecture compute machines to a cluster with multi-architecture compute machines by updating to a multi-architecture, manifest-listed payload. This allows you to add mixed architecture compute nodes to your cluster. -For information about configuring your multi-architecture compute machines, see _Configuring multi-architecture compute machines on an {product-title} cluster_. +For information about configuring your multi-architecture compute machines, see "Configuring multi-architecture compute machines on an {product-title} cluster". + +Before migrating your single-architecture cluster to a cluster with multi-architecture compute machines, it is recommended to install the Multiarch Tuning Operator, and deploy a `ClusterPodPlacementConfig` custom resource. For more information, see "Managing workloads on multi-architecture clusters by using the Multiarch Tuning Operator [IMPORTANT] ==== @@ -27,6 +29,7 @@ include::modules/migrating-to-multi-arch-cli.adoc[leveloffset=+1] [role="_additional-resources"] .Additional resources * xref:../../post_installation_configuration/configuring-multi-arch-compute-machines/multi-architecture-configuration.adoc#multi-architecture-configuration[Configuring multi-architecture compute machines on an {product-title} cluster] +* xref:../../post_installation_configuration/configuring-multi-arch-compute-machines/multiarch-tuning-operator.adoc#multiarch-tuning-operator[Managing workloads on multi-architecture clusters by using the Multiarch Tuning Operator]. * xref:../../updating/updating_a_cluster/updating-cluster-web-console.adoc#updating-cluster-web-console[Updating a cluster using the web console] * xref:../../updating/updating_a_cluster/updating-cluster-cli.adoc#updating-cluster-cli[Updating a cluster using the CLI] * xref:../../updating/understanding_updates/intro-to-updates.adoc#understanding-clusterversion-conditiontypes_understanding-openshift-updates[Understanding cluster version condition types] diff --git a/updating/updating_a_cluster/updating-cluster-cli.adoc b/updating/updating_a_cluster/updating-cluster-cli.adoc index dc04fa1d4a52..13f0f98e413b 100644 --- a/updating/updating_a_cluster/updating-cluster-cli.adoc +++ b/updating/updating_a_cluster/updating-cluster-cli.adoc @@ -30,7 +30,7 @@ See xref:../../authentication/using-rbac.adoc#using-rbac[Using RBAC to define an * Ensure that all machine config pools (MCPs) are running and not paused. Nodes associated with a paused MCP are skipped during the update process. You can pause the MCPs if you are performing a canary rollout update strategy. * If your cluster uses manually maintained credentials, update the cloud provider resources for the new release. For more information, including how to determine if this is a requirement for your cluster, see xref:../../updating/preparing_for_updates/preparing-manual-creds-update.adoc#preparing-manual-creds-update[Preparing to update a cluster with manually maintained credentials]. * Ensure that you address all `Upgradeable=False` conditions so the cluster allows an update to the next minor version. An alert displays at the top of the *Cluster Settings* page when you have one or more cluster Operators that cannot be updated. You can still update to the next available patch update for the minor release you are currently on. -// * Review the list of APIs that were removed in Kubernetes 1.28, migrate any affected components to use the new API version, and provide the administrator acknowledgment. For more information, see xref:../../updating/preparing_for_updates/updating-cluster-prepare.adoc#updating-cluster-prepare[Preparing to update to {product-title} 4.14]. +* Review the list of APIs that were removed in Kubernetes 1.28, migrate any affected components to use the new API version, and provide the administrator acknowledgment. For more information, see xref:../../updating/preparing_for_updates/updating-cluster-prepare.adoc#updating-cluster-prepare[Preparing to update to {product-title} 4.16]. * If you run an Operator or you have configured any application with the pod disruption budget, you might experience an interruption during the update process. If `minAvailable` is set to 1 in `PodDisruptionBudget`, the nodes are drained to apply pending machine configs which might block the eviction process. If several nodes are rebooted, all the pods might run on only one node, and the `PodDisruptionBudget` field can prevent the node drain. [IMPORTANT] @@ -57,6 +57,9 @@ include::modules/updating-sno.adoc[leveloffset=+1] // Updating a cluster by using the CLI include::modules/update-upgrading-cli.adoc[leveloffset=+1] +//Introducing oc adm upgrade status - Tech Preview +include::modules/update-upgrading-oc-adm-upgrade-status.adoc[leveloffset=+1] + [role="_additional-resources"] .Additional resources diff --git a/updating/updating_a_cluster/updating_disconnected_cluster/disconnected-update-osus.adoc b/updating/updating_a_cluster/updating_disconnected_cluster/disconnected-update-osus.adoc index eb88863818e4..247a4becab34 100644 --- a/updating/updating_a_cluster/updating_disconnected_cluster/disconnected-update-osus.adoc +++ b/updating/updating_a_cluster/updating_disconnected_cluster/disconnected-update-osus.adoc @@ -24,7 +24,7 @@ The following steps outline the high-level workflow on how to update a cluster i . Create a graph data container image for the OpenShift Update Service. -. Install the OSUS application and configure your clusters to use the local OpenShift Update Service. +. Install the OSUS application and configure your clusters to use the OpenShift Update Service in your environment. . Perform a supported update procedure from the documentation as you would with a connected cluster. @@ -40,7 +40,7 @@ include::modules/disconnected-osus-overview.adoc[leveloffset=+1] == Prerequisites * You must have the `oc` command-line interface (CLI) tool installed. -* You must provision a local container image registry with the container images for your update, as described in xref:../../../updating/updating_a_cluster/updating_disconnected_cluster/mirroring-image-repository.adoc#mirroring-ocp-image-repository[Mirroring {product-title} images]. +* You must provision a container image registry in your environment with the container images for your update, as described in xref:../../../updating/updating_a_cluster/updating_disconnected_cluster/mirroring-image-repository.adoc#mirroring-ocp-image-repository[Mirroring {product-title} images]. [id="registry-configuration-for-update-service"] == Configuring access to a secured registry for the OpenShift Update Service @@ -66,7 +66,7 @@ data: ... -----END CERTIFICATE----- ---- -<1> The OpenShift Update Service Operator requires the config map key name updateservice-registry in the registry CA cert. +<1> The OpenShift Update Service Operator requires the config map key name `updateservice-registry` in the registry CA cert. <2> If the registry has the port, such as `registry-with-port.example.com:5000`, `:` should be replaced with `..`. // Updating the global cluster pull secret @@ -114,7 +114,7 @@ and must be no more than 63 characters`, try creating the Update Service with a ==== // Configuring the Cluster Version Operator (CVO) -include::modules/update-service-configure-cvo.adoc[leveloffset=+3] +include::modules/update-service-configure-cvo.adoc[leveloffset=+1] [NOTE] ==== @@ -126,15 +126,15 @@ See xref:../../../networking/enable-cluster-wide-proxy.adoc#enable-cluster-wide- Before updating your cluster, confirm that the following conditions are met: -* The Cluster Version Operator (CVO) is configured to use your locally-installed OpenShift Update Service application. +* The Cluster Version Operator (CVO) is configured to use your installed OpenShift Update Service application. * The release image signature config map for the new release is applied to your cluster. + [NOTE] ==== -The release image signature config map allows the Cluster Version Operator (CVO) to ensure the integrity of release images by verifying that the actual image signatures match the expected signatures. +The Cluster Version Operator (CVO) uses release image signatures to ensure that release images have not been modified, by verifying that the release image signatures match the expected result. ==== -* The current release and update target release images are mirrored to a locally accessible registry. -* A recent graph data container image has been mirrored to your local registry. +* The current release and update target release images are mirrored to a registry in the disconnected environment. +* A recent graph data container image has been mirrored to your registry. * A recent version of the OpenShift Update Service Operator is installed. + [NOTE] @@ -143,7 +143,7 @@ If you have not recently installed or updated the OpenShift Update Service Opera See xref:../../../operators/admin/olm-restricted-networks.adoc#olm-restricted-networks[Using Operator Lifecycle Manager on restricted networks] for more information about how to update your OLM catalog in a disconnected environment. ==== -After you configure your cluster to use the locally-installed OpenShift Update Service and local mirror registry, you can use any of the following update methods: +After you configure your cluster to use the installed OpenShift Update Service and local mirror registry, you can use any of the following update methods: ** xref:../../../updating/updating_a_cluster/updating-cluster-web-console.adoc#updating-cluster-web-console[Updating a cluster using the web console] ** xref:../../../updating/updating_a_cluster/updating-cluster-cli.adoc#updating-cluster-cli[Updating a cluster using the CLI] diff --git a/updating/updating_a_cluster/updating_disconnected_cluster/disconnected-update.adoc b/updating/updating_a_cluster/updating_disconnected_cluster/disconnected-update.adoc index 6f72574b54ff..5f9c2baca6e0 100644 --- a/updating/updating_a_cluster/updating_disconnected_cluster/disconnected-update.adoc +++ b/updating/updating_a_cluster/updating_disconnected_cluster/disconnected-update.adoc @@ -69,4 +69,4 @@ include::modules/generating-icsp-object-scoped-to-a-registry.adoc[leveloffset=+1 * xref:../../../operators/admin/olm-restricted-networks.adoc#olm-restricted-networks[Using Operator Lifecycle Manager on restricted networks] -* xref:../../../post_installation_configuration/machine-configuration-tasks.adoc#machine-config-overview-post-install-machine-configuration-tasks[Machine Config Overview] +* xref:../../../machine_configuration/index.adoc#machine-config-overview[Machine Config Overview] diff --git a/upgrading/rosa-upgrading-sts.adoc b/upgrading/rosa-upgrading-sts.adoc index f31edd084355..1f93d98d0a7b 100644 --- a/upgrading/rosa-upgrading-sts.adoc +++ b/upgrading/rosa-upgrading-sts.adoc @@ -11,7 +11,7 @@ toc::[] To plan an upgrade, review the xref:../rosa_architecture/rosa_policy_service_definition/rosa-life-cycle.adoc#rosa-life-cycle[{product-title} update life cycle]. The life cycle page includes release definitions, support and upgrade requirements, installation policy information and life cycle dates. -Upgrades are manually initiated or automatically scheduled. Red Hat Site Reliability Engineers (SREs) monitor upgrade progress and remedy any issues encountered. +Upgrades are manually initiated or automatically scheduled. Red{nbsp}Hat Site Reliability Engineers (SREs) monitor upgrade progress and remedy any issues encountered. [id="rosa-sts-upgrading-a-cluster-with-sts"] == Upgrading a ROSA Classic cluster diff --git a/virt/about_virt/about-virt.adoc b/virt/about_virt/about-virt.adoc index 10e1a8ac102d..86b78160eb11 100644 --- a/virt/about_virt/about-virt.adoc +++ b/virt/about_virt/about-virt.adoc @@ -52,3 +52,4 @@ endif::openshift-rosa,openshift-dedicated[] * xref:../../virt/live_migration/virt-about-live-migration.adoc#virt-about-live-migration[About live migration] * xref:../../virt/nodes/virt-node-maintenance.adoc#eviction-strategies[Eviction strategies] * link:https://access.redhat.com/articles/6994974[Tuning & Scaling Guide] +* link:https://access.redhat.com/articles/6571671[Supported limits for OpenShift Virtualization 4.x] diff --git a/virt/backup_restore/virt-backing-up-vms.adoc b/virt/backup_restore/virt-backing-up-vms.adoc deleted file mode 100644 index 682f8ae093e2..000000000000 --- a/virt/backup_restore/virt-backing-up-vms.adoc +++ /dev/null @@ -1,65 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -include::_attributes/common-attributes.adoc[] -[id="virt-backing-up-vms"] -= Backing up virtual machines -:context: virt-backing-up-vms - -toc::[] - -You back up virtual machines (VMs) by creating an OpenShift API for Data Protection (OADP) xref:../../virt/backup_restore/virt-backing-up-vms.adoc#oadp-creating-backup-cr_virt-backing-up-vms[`Backup` custom resource (CR)]. - -The `Backup` CR performs the following actions: - -// Hiding MOG from ROSA/OSD as not supported -ifndef::openshift-rosa,openshift-dedicated[] -* Backs up {VirtProductName} resources by creating an archive file on S3-compatible object storage, such as xref:../../backup_and_restore/application_backup_and_restore/installing/installing-oadp-mcg.adoc#installing-oadp-mcg[Multicloud Object Gateway], Noobaa, or Minio. -endif::openshift-rosa,openshift-dedicated[] -ifdef::openshift-rosa,openshift-dedicated[] -* Backs up {VirtProductName} resources by creating an archive file on S3-compatible object storage, such as Noobaa or Minio. -endif::openshift-rosa,openshift-dedicated[] - -// Hiding Backup/Restore link until 68901 is merged -ifndef::openshift-rosa,openshift-dedicated[] -* Backs up VM disks by using one of the following options: - -** xref:../../virt/backup_restore/virt-backing-up-vms.adoc#oadp-backing-up-pvs-csi_virt-backing-up-vms[Container Storage Interface (CSI) snapshots] on CSI-enabled cloud storage, such as Ceph RBD or Ceph FS. -** xref:../../backup_and_restore/application_backup_and_restore/backing_up_and_restoring/oadp-backing-up-applications-restic-doc.adoc#backing-up-applications[Backing up applications with File System Backup: Kopia or Restic] on object storage. -endif::openshift-rosa,openshift-dedicated[] -ifdef::openshift-rosa,openshift-dedicated[] -* Backs up VM disks by using one of the following options: - -** xref:../../virt/backup_restore/virt-backing-up-vms.adoc#oadp-backing-up-pvs-csi_virt-backing-up-vms[Container Storage Interface (CSI) snapshots] on CSI-enabled cloud storage, such as Ceph RBD or Ceph FS. -** Backing up applications with File System Backup: Kopia or Restic on object storage. -endif::openshift-rosa,openshift-dedicated[] - -[NOTE] -==== -OADP provides backup hooks to freeze the VM file system before the backup operation and unfreeze it when the backup is complete. - -The `kubevirt-controller` creates the `virt-launcher` pods with annotations that enable Velero to run the `virt-freezer` binary before and after the backup operation. - -The `freeze` and `unfreeze` APIs are subresources of the VM snapshot API. See xref:../../virt/backup_restore/virt-backup-restore-snapshots.adoc#virt-about-vm-snapshots_virt-backup-restore-snapshots[About virtual machine snapshots] for details. -==== - -ifndef::openshift-rosa,openshift-dedicated[] -You can add xref:../../backup_and_restore/application_backup_and_restore/backing_up_and_restoring/oadp-creating-backup-hooks-doc.adoc#backing-up-applications[hooks] to the `Backup` CR to run commands on specific VMs before or after the backup operation. - -You schedule a backup by creating a xref:../../backup_and_restore/application_backup_and_restore/backing_up_and_restoring/oadp-scheduling-backups-doc.adoc#backing-up-applications[`Schedule` CR] instead of a `Backup` CR. -endif::openshift-rosa,openshift-dedicated[] -ifdef::openshift-rosa,openshift-dedicated[] -You can add hooks to the `Backup` CR to run commands on specific VMs before or after the backup operation. - -You schedule a backup by creating a `Schedule` CR instead of a `Backup` CR. -endif::openshift-rosa,openshift-dedicated[] - -include::modules/oadp-creating-backup-cr.adoc[leveloffset=+1] -include::modules/oadp-backing-up-pvs-csi.adoc[leveloffset=+2] -include::modules/oadp-backing-up-applications-restic.adoc[leveloffset=+2] -include::modules/oadp-creating-backup-hooks.adoc[leveloffset=+2] - -ifndef::openshift-rosa,openshift-dedicated[] -[id="additional-resources_virt-backing-up-vms"] -== Additional resources - -* xref:../../storage/container_storage_interface/persistent-storage-csi-snapshots.adoc#persistent-storage-csi-snapshots-overview_persistent-storage-csi-snapshots[Overview of CSI volume snapshots] -endif::openshift-rosa,openshift-dedicated[] diff --git a/virt/backup_restore/virt-backup-restore-overview.adoc b/virt/backup_restore/virt-backup-restore-overview.adoc index 8548f32b55e6..6b6dce95c5e8 100644 --- a/virt/backup_restore/virt-backup-restore-overview.adoc +++ b/virt/backup_restore/virt-backup-restore-overview.adoc @@ -3,45 +3,54 @@ include::_attributes/common-attributes.adoc[] [id="virt-backup-restore-overview"] = Backing up and restoring virtual machines :context: virt-backup-restore-overview +:virt-backup-restore-overview: +:credentials: cloud-credentials +:provider: gcp toc::[] -//Hiding all links until PR 68901 merges -ifndef::openshift-rosa,openshift-dedicated[] -Back up and restore virtual machines by using the xref:../../backup_and_restore/index.adoc#application-backup-restore-operations-overview[OpenShift API for Data Protection (OADP)]. +You can install the {oadp-first} with {VirtProductName} by installing the {oadp-short} Operator and configuring a backup location. Then, you can install the Data Protection Application. -.Prerequisites +Back up and restore virtual machines by using the xref:../../backup_and_restore/index.adoc#application-backup-restore-operations-overview[{oadp-full}]. -* Access to the cluster as a user with the `cluster-admin` role. -// Non-admin user [https://issues.redhat.com/browse/OADP-203] is targeted for OADP 1.2. +[NOTE] +==== +{oadp-full} with {VirtProductName} supports the following backup and restore storage options: -.Procedure +* Container Storage Interface (CSI) backups -. Install the xref:../../backup_and_restore/application_backup_and_restore/installing/about-installing-oadp.adoc#about-installing-oadp[OADP Operator] according to the instructions for your storage provider. -. Install the xref:../../backup_and_restore/application_backup_and_restore/installing/installing-oadp-ocs.adoc#oadp-installing-dpa_installing-oadp-ocs[Data Protection Application] with the `kubevirt` and `openshift` xref:../../backup_and_restore/application_backup_and_restore/oadp-features-plugins.adoc#oadp-plugins_oadp-features-plugins[plugins]. -. Back up virtual machines by creating a xref:../../backup_and_restore/application_backup_and_restore/backing_up_and_restoring/backing-up-applications.adoc#backing-up-applications[`Backup` custom resource (CR)]. -. Restore the `Backup` CR by creating a xref:../../backup_and_restore/application_backup_and_restore/backing_up_and_restoring/restoring-applications.adoc#restoring-applications[`Restore` CR]. +* Container Storage Interface (CSI) backups with DataMover + +The following storage options are excluded: + +* File system backup and restore + +* Volume snapshot backup and restore + +For more information, see xref:../../backup_and_restore/application_backup_and_restore/backing_up_and_restoring/oadp-backing-up-applications-restic-doc.adoc#oadp-backing-up-applications-restic-doc[Backing up applications with File System Backup: Kopia or Restic]. +==== +To install the {oadp-short} Operator in a restricted network environment, you must first disable the default OperatorHub sources and mirror the Operator catalog. See xref:../../operators/admin/olm-restricted-networks.adoc#olm-restricted-networks[Using Operator Lifecycle Manager on restricted networks] for details. + +include::modules/install-and-configure-oadp-kubevirt.adoc[leveloffset=+1] [role="_additional-resources"] -[id="additional-resources_virt-backup-restore-overview"] -== Additional resources +.Additional resources -* xref:../../backup_and_restore/application_backup_and_restore/oadp-features-plugins.adoc#oadp-features-plugins[OADP features and plugins] -* xref:../../backup_and_restore/application_backup_and_restore/troubleshooting.adoc#troubleshooting[Troubleshooting] -endif::openshift-rosa,openshift-dedicated[] +* xref:../../backup_and_restore/application_backup_and_restore/oadp-features-plugins.adoc#oadp-plugins_oadp-features-plugins[{oadp-short} plugins] +* xref:../../backup_and_restore/application_backup_and_restore/backing_up_and_restoring/backing-up-applications.adoc#backing-up-applications[`Backup` custom resource (CR)] +* xref:../../backup_and_restore/application_backup_and_restore/backing_up_and_restoring/restoring-applications.adoc#restoring-applications[`Restore` CR] +* xref:../../operators/admin/olm-restricted-networks.adoc#olm-restricted-networks[Using Operator Lifecycle Manager on restricted networks] -ifdef::openshift-rosa,openshift-dedicated[] -Back up and restore virtual machines by using the OpenShift API for Data Protection (OADP). +include::modules/oadp-installing-dpa-1-3.adoc[leveloffset=+1] -.Prerequisites +[IMPORTANT] +==== +Red Hat supports using {VirtProductName} 4.14 or later with {oadp-short} 1.3.x or later. -* Access to the cluster as a user with the `cluster-admin` role. -// Non-admin user [https://issues.redhat.com/browse/OADP-203] is targeted for OADP 1.2. +{oadp-short} versions before 1.3.0 are not supported for back up and restore of {VirtProductName}. +==== -.Procedure +:!provider: +:!credentials: +:!virt-backup-restore-overview: -. Install the OADP Operator according to the instructions for your storage provider. -. Install the Data Protection Application with the `kubevirt` and `openshift` plugins. -. Back up virtual machines by creating a `Backup` custom resource (CR). -. Restore the `Backup` CR by creating a `Restore` CR. -endif::openshift-rosa,openshift-dedicated[] \ No newline at end of file diff --git a/virt/backup_restore/virt-installing-configuring-oadp.adoc b/virt/backup_restore/virt-installing-configuring-oadp.adoc deleted file mode 100644 index abcc0ad7c67f..000000000000 --- a/virt/backup_restore/virt-installing-configuring-oadp.adoc +++ /dev/null @@ -1,39 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -include::_attributes/common-attributes.adoc[] -[id="virt-installing-configuring-oadp"] -= Installing and configuring OADP -:context: virt-installing-configuring-oadp -:virt-installing-configuring-oadp: -:credentials: cloud-credentials -:provider: gcp - -toc::[] - -As a cluster administrator, you install the OpenShift API for Data Protection (OADP) by installing the OADP Operator. The Operator installs link:https://{velero-domain}/docs/v{velero-version}/[Velero {velero-version}]. - -You create a default `Secret` for your backup storage provider and then you install the Data Protection Application. - -include::modules/oadp-installing-operator.adoc[leveloffset=+1] - -include::modules/oadp-about-backup-snapshot-locations-secrets.adoc[leveloffset=+1] -include::modules/oadp-creating-default-secret.adoc[leveloffset=+2] -include::modules/oadp-secrets-for-different-credentials.adoc[leveloffset=+2] - -[id="configuring-dpa-ocs"] -== Configuring the Data Protection Application - -You can configure the Data Protection Application by setting Velero resource allocations or enabling self-signed CA certificates. - -include::modules/oadp-setting-resource-limits-and-requests.adoc[leveloffset=+2] -include::modules/oadp-self-signed-certificate.adoc[leveloffset=+2] - -include::modules/oadp-installing-dpa-1-2-and-earlier.adoc[leveloffset=+1] -include::modules/oadp-installing-dpa-1-3.adoc[leveloffset=+1] -include::modules/oadp-enabling-csi-dpa.adoc[leveloffset=+2] - -[id="uninstalling-oadp_{context}"] -== Uninstalling OADP - -You uninstall the OpenShift API for Data Protection (OADP) by deleting the OADP Operator. See xref:../../operators/admin/olm-deleting-operators-from-cluster.adoc#olm-deleting-operators-from-cluster[Deleting Operators from a cluster] for details. - -:virt-installing-configuring-oadp!: diff --git a/virt/backup_restore/virt-restoring-vms.adoc b/virt/backup_restore/virt-restoring-vms.adoc deleted file mode 100644 index fdac38112e7b..000000000000 --- a/virt/backup_restore/virt-restoring-vms.adoc +++ /dev/null @@ -1,14 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -include::_attributes/common-attributes.adoc[] -[id="virt-restoring-vms"] -= Restoring virtual machines -:context: virt-restoring-vms - -toc::[] - -You restore an OpenShift API for Data Protection (OADP) `Backup` custom resource (CR) by creating a xref:../../virt/backup_restore/virt-restoring-vms.adoc#oadp-creating-restore-cr_virt-restoring-vms[`Restore` CR]. - -You can add xref:../../virt/backup_restore/virt-restoring-vms.adoc#oadp-creating-restore-hooks_virt-restoring-vms[hooks] to the `Restore` CR to run commands in init containers, before the application container starts, or in the application container itself. - -include::modules/oadp-creating-restore-cr.adoc[leveloffset=+1] -include::modules/oadp-creating-restore-hooks.adoc[leveloffset=+2] diff --git a/virt/getting_started/virt-getting-started.adoc b/virt/getting_started/virt-getting-started.adoc index 7207a7371c52..f9565a53a4f4 100644 --- a/virt/getting_started/virt-getting-started.adoc +++ b/virt/getting_started/virt-getting-started.adoc @@ -26,6 +26,7 @@ endif::openshift-rosa,openshift-dedicated[] * xref:../../virt/install/installing-virt.adoc#virt-installing-virt-operator_installing-virt[Install the {VirtProductName} Operator]. * xref:../../virt/getting_started/virt-using-the-cli-tools.adoc#installing-virtctl_virt-using-the-cli-tools[Install the `virtctl` command line interface (CLI) tool]. + [discrete] [id="additional-resources_planning-and-installing"] [role="_additional-resources"] @@ -101,5 +102,7 @@ Manage a VM: * xref:../../virt/storage/virt-storage-config-overview.adoc#virt-storage-config-overview[Configure storage options and automatic boot source updates]. * xref:../../virt/monitoring/virt-monitoring-overview.adoc#virt-monitoring-overview[Learn about monitoring and health checks]. * xref:../../virt/live_migration/virt-about-live-migration.adoc#virt-about-live-migration[Learn about live migration]. -* xref:../../virt/backup_restore/virt-backup-restore-overview.adoc#virt-backup-restore-overview[Back up and restore VMs]. +ifndef::openshift-rosa,openshift-dedicated[] +* xref:../../backup_and_restore/application_backup_and_restore/installing/installing-oadp-kubevirt.adoc#installing-oadp-kubevirt[Back up and restore VMs by using the {oadp-first}]. +endif::openshift-rosa,openshift-dedicated[] * link:https://access.redhat.com/articles/6994974[Tune and scale your cluster]. diff --git a/virt/monitoring/virt-exposing-downward-metrics.adoc b/virt/monitoring/virt-exposing-downward-metrics.adoc index 4eec7c783e73..f86fd50f5abd 100644 --- a/virt/monitoring/virt-exposing-downward-metrics.adoc +++ b/virt/monitoring/virt-exposing-downward-metrics.adoc @@ -10,6 +10,13 @@ As an administrator, you can expose a limited set of host and virtual machine (V Users can view the metrics results by using the command line or the `vm-dump-metrics tool`. +[NOTE] +==== +On Red Hat Enterprise Linux (RHEL) 9, use the command line to view downward metrics. See xref:../../virt/monitoring/virt-exposing-downward-metrics.adoc#virt-viewing-downward-metrics-cli_virt-exposing-downward-metrics[Viewing downward metrics by using the command line]. + +The vm-dump-metrics tool is not supported on the Red Hat Enterprise Linux (RHEL) 9 platform. +==== + [id="virt-enabling-disabling-feature-gate"] == Enabling or disabling the downwardMetrics feature gate diff --git a/virt/monitoring/virt-prometheus-queries.adoc b/virt/monitoring/virt-prometheus-queries.adoc index ad676a957dc5..0c57a5252708 100644 --- a/virt/monitoring/virt-prometheus-queries.adoc +++ b/virt/monitoring/virt-prometheus-queries.adoc @@ -20,7 +20,7 @@ endif::openshift-rosa,openshift-dedicated[] // Hiding in ROSA/OSD as user cannot edit MCO ifndef::openshift-rosa,openshift-dedicated[] -* To use the vCPU metric, the `schedstats=enable` kernel argument must be applied to the `MachineConfig` object. This kernel argument enables scheduler statistics used for debugging and performance tuning and adds a minor additional load to the scheduler. For more information, see xref:../../post_installation_configuration/machine-configuration-tasks.adoc#nodes-nodes-kernel-arguments_post-install-machine-configuration-tasks[Adding kernel arguments to nodes]. +* To use the vCPU metric, the `schedstats=enable` kernel argument must be applied to the `MachineConfig` object. This kernel argument enables scheduler statistics used for debugging and performance tuning and adds a minor additional load to the scheduler. For more information, see xref:../../machine_configuration/machine-configs-configure.adoc#nodes-nodes-kernel-arguments_machine-configs-configure[Adding kernel arguments to nodes]. endif::openshift-rosa,openshift-dedicated[] * For guest memory swapping queries to return data, memory swapping must be enabled on the virtual guests. diff --git a/virt/monitoring/virt-running-cluster-checkups.adoc b/virt/monitoring/virt-running-cluster-checkups.adoc index 49a33f6a7fab..05e2fbf35f1a 100644 --- a/virt/monitoring/virt-running-cluster-checkups.adoc +++ b/virt/monitoring/virt-running-cluster-checkups.adoc @@ -22,6 +22,21 @@ include::snippets/technology-preview.adoc[] include::modules/virt-about-cluster-checkup-framework.adoc[leveloffset=+1] +== Running cluster checkups in the web console + +Use the web console to run a latency or storage checkup on a cluster. + +Use the following procedures the first time you run a latency checkup and storage checkup in the web console. For additional checkups, click *Run checkup* on either checkup tab, and select the appropriate checkup from the drop down menu. + +[IMPORTANT] +==== +Before you run a latency checkup, you must first xref:../../virt/vm_networking/virt-connecting-vm-to-linux-bridge.adoc#virt-connecting-vm-to-linux-bridge[create a bridge interface] on the cluster nodes to connect the VM's secondary interface to any interface on the node. If you do not create a bridge interface, the VMs will not start and the job will fail. +==== + +include::modules/virt-latency-checkup-web-console.adoc[leveloffset=+2] + +include::modules/virt-storage-checkup-web-console.adoc[leveloffset=+2] + include::modules/virt-measuring-latency-vm-secondary-network.adoc[leveloffset=+1] include::modules/virt-checking-cluster-dpdk-readiness.adoc[leveloffset=+1] diff --git a/virt/support/virt-collecting-virt-data.adoc b/virt/support/virt-collecting-virt-data.adoc index 7fa74d0be2b4..4bad7aa64753 100644 --- a/virt/support/virt-collecting-virt-data.adoc +++ b/virt/support/virt-collecting-virt-data.adoc @@ -39,7 +39,7 @@ Collecting data about your environment minimizes the time required to analyze an // must-gather not supported for ROSA/OSD, per Dustin Row ifndef::openshift-rosa,openshift-dedicated[] . xref:../../support/gathering-cluster-data.adoc#support_gathering_data_gathering-cluster-data[Collect must-gather data for the cluster]. -. link:https://access.redhat.com/documentation/en-us/red_hat_openshift_data_foundation/4.15/html-single/troubleshooting_openshift_data_foundation/index#downloading-log-files-and-diagnostic-information_rhodf[Collect must-gather data for {rh-storage-first}], if necessary. +. link:https://access.redhat.com/documentation/en-us/red_hat_openshift_data_foundation/4.16/html-single/troubleshooting_openshift_data_foundation/index#downloading-log-files-and-diagnostic-information_rhodf[Collect must-gather data for {rh-storage-first}], if necessary. . xref:../../virt/support/virt-collecting-virt-data.adoc#virt-using-virt-must-gather_virt-collecting-virt-data[Collect must-gather data for {VirtProductName}]. . xref:../../observability/monitoring/managing-metrics.adoc#querying-metrics-for-all-projects-as-an-administrator_managing-metrics[Collect Prometheus metrics for the cluster]. endif::openshift-rosa,openshift-dedicated[] diff --git a/virt/virtual_machines/advanced_vm_management/virt-about-multi-queue.adoc b/virt/virtual_machines/advanced_vm_management/virt-about-multi-queue.adoc new file mode 100644 index 000000000000..e1ae254e12c2 --- /dev/null +++ b/virt/virtual_machines/advanced_vm_management/virt-about-multi-queue.adoc @@ -0,0 +1,25 @@ +:_mod-docs-content-type: ASSEMBLY +[id="virt-about-multi-queue"] += About multi-queue functionality +include::_attributes/common-attributes.adoc[] +:context: virt-about-multi-queue + +toc::[] + +Use multi-queue functionality to scale network throughput and performance on virtual machines (VMs) with multiple vCPUs. + +By default, the `queueCount` value, which is derived from the domain XML, is determined by the number of vCPUs allocated to a VM. Network performance does not scale as the number of vCPUs increases. Additionally, because virtio-net has only one Tx and Rx queue, guests cannot transmit or retrieve packs in parallel. + +[NOTE] +==== +Enabling virtio-net multiqueue does not offer significant improvements when the number of vNICs in a guest instance is proportional to the number of vCPUs. +==== + +[id="virt-about-multi-queue-_{context}"] +== Known limitations + +* MSI vectors are still consumed if virtio-net multiqueue is enabled in the host but not enabled in the guest operating system by the administrator. +* Each virtio-net queue consumes 64 KiB of kernel memory for the vhost driver. +* Starting a VM with more than 16 CPUs results in no connectivity if `networkInterfaceMultiqueue` is set to 'true' (link:https://issues.redhat.com/browse/CNV-16107[CNV-16107]). + +include::modules/virt-enabling-multi-queue.adoc[leveloffset=+1] diff --git a/virt/virtual_machines/advanced_vm_management/virt-configuring-pci-passthrough.adoc b/virt/virtual_machines/advanced_vm_management/virt-configuring-pci-passthrough.adoc index 59776061d752..164d38779c7a 100644 --- a/virt/virtual_machines/advanced_vm_management/virt-configuring-pci-passthrough.adoc +++ b/virt/virtual_machines/advanced_vm_management/virt-configuring-pci-passthrough.adoc @@ -49,4 +49,4 @@ include::modules/virt-assigning-pci-device-virtual-machine.adoc[leveloffset=+2] == Additional resources * link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/7/html/virtualization_deployment_and_administration_guide/sect-troubleshooting-enabling_intel_vt_x_and_amd_v_virtualization_hardware_extensions_in_bios[Enabling Intel VT-X and AMD-V Virtualization Hardware Extensions in BIOS] * link:https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/8/html/configuring_basic_system_settings/assembly_managing-file-permissions_configuring-basic-system-settings[Managing file permissions] -* xref:../../../post_installation_configuration/machine-configuration-tasks.adoc#post-install-machine-configuration-tasks[Postinstallation machine configuration tasks] +* xref:../../../machine_configuration/index.adoc#machine-config-overview[Machine Config Overview] diff --git a/virt/virtual_machines/advanced_vm_management/virt-uefi-mode-for-vms.adoc b/virt/virtual_machines/advanced_vm_management/virt-uefi-mode-for-vms.adoc index 21c11eac8ad2..3a67acb56680 100644 --- a/virt/virtual_machines/advanced_vm_management/virt-uefi-mode-for-vms.adoc +++ b/virt/virtual_machines/advanced_vm_management/virt-uefi-mode-for-vms.adoc @@ -10,3 +10,5 @@ You can boot a virtual machine (VM) in Unified Extensible Firmware Interface (UE include::modules/virt-about-uefi-mode-for-vms.adoc[leveloffset=+1] include::modules/virt-booting-vms-uefi-mode.adoc[leveloffset=+1] +include::modules/virt-enabling-persistent-efi.adoc[leveloffset=+1] +include::modules/virt-configuring-vm-with-persistent-efi.adoc[leveloffset=+1] \ No newline at end of file diff --git a/virt/virtual_machines/virt-accessing-vm-consoles.adoc b/virt/virtual_machines/virt-accessing-vm-consoles.adoc index 12a9bfeff917..d07dc52861a5 100644 --- a/virt/virtual_machines/virt-accessing-vm-consoles.adoc +++ b/virt/virtual_machines/virt-accessing-vm-consoles.adoc @@ -27,6 +27,10 @@ include::modules/virt-connecting-vm-virtctl.adoc[leveloffset=+2] include::modules/virt-temporary-token-VNC.adoc[leveloffset=+2] :!vnc-console: +:context: vnc-console +include::modules/virt-cluster-role-VNC.adoc[leveloffset=+3] +:!vnc-console: + [id="serial-console_virt-accessing-vm-consoles"] == Connecting to the serial console diff --git a/virt/virtual_machines/virtual_disks/virt-configuring-shared-volumes-for-vms.adoc b/virt/virtual_machines/virtual_disks/virt-configuring-shared-volumes-for-vms.adoc index 82a5d1859109..7fafc5f13f30 100644 --- a/virt/virtual_machines/virtual_disks/virt-configuring-shared-volumes-for-vms.adoc +++ b/virt/virtual_machines/virtual_disks/virt-configuring-shared-volumes-for-vms.adoc @@ -10,8 +10,10 @@ You can configure shared disks to allow multiple virtual machines (VMs) to share You configure disk sharing by exposing the storage as either of these types: -* An ordinary virtual machine disk -* A logical unit number (LUN) device with an iSCSi connection and raw device mapping, as required for Windows Failover Clustering for shared volumes +* An ordinary VM disk +* A logical unit number (LUN) disk with an iSCSi connection and raw device mapping, as required for Windows Failover Clustering for shared volumes + +In addition to configuring disk sharing, you can also set an error policy for each ordinary VM disk or LUN disk. The error policy controls how the hypervisor behaves when an input/output error occurs on a disk Read or Write. include::modules/virt-configuring-vm-disk-sharing.adoc[leveloffset=+1] diff --git a/virt/vm_networking/virt-accessing-vm-internal-fqdn.adoc b/virt/vm_networking/virt-accessing-vm-internal-fqdn.adoc new file mode 100644 index 000000000000..675c5b1e18ad --- /dev/null +++ b/virt/vm_networking/virt-accessing-vm-internal-fqdn.adoc @@ -0,0 +1,29 @@ +:_mod-docs-content-type: ASSEMBLY +[id="virt-accessing-vm-internal-fqdn"] += Accessing a virtual machine by using its internal FQDN +include::_attributes/common-attributes.adoc[] +:context: virt-accessing-vm-internal-fqdn + +toc::[] + +You can access a virtual machine (VM) that is connected to the default internal pod network on a stable fully qualified domain name (FQDN) by using headless services. + +A Kubernetes _headless service_ is a form of service that does not allocate a cluster IP address to represent a set of pods. Instead of providing a single virtual IP address for the service, a headless service creates a DNS record for each pod associated with the service. You can expose a VM through its FQDN without having to expose a specific TCP or UDP port. + +[IMPORTANT] +==== +If you created a VM by using the {product-title} web console, you can find its internal FQDN listed in the *Network* tile on the *Overview* tab of the *VirtualMachine details* page. For more information about connecting to the VM, see xref:../../virt/vm_networking/virt-accessing-vm-internal-fqdn.adoc#virt-connecting-vm-internal-fqdn_virt-accessing-vm-internal-fqdn[Connecting to a virtual machine by using its internal FQDN]. +==== + + +include::modules/virt-creating-headless-services.adoc[leveloffset=+1] + +include::modules/virt-discovering-vm-internal-fqdn.adoc[leveloffset=+1] + +include::modules/virt-connecting-vm-internal-fqdn.adoc[leveloffset=+1] + +[role="_additional-resources"] +[id="additional-resources_virt-accesing-vm-internal-fqdn"] +== Additional resources + +* xref:../../virt/vm_networking/virt-exposing-vm-with-service.adoc#virt-exposing-vm-with-service[Exposing a VM by using a service] \ No newline at end of file diff --git a/virt/vm_networking/virt-accessing-vm-secondary-network-fqdn.adoc b/virt/vm_networking/virt-accessing-vm-secondary-network-fqdn.adoc index 7608634b9607..5aefb4059c0e 100644 --- a/virt/vm_networking/virt-accessing-vm-secondary-network-fqdn.adoc +++ b/virt/vm_networking/virt-accessing-vm-secondary-network-fqdn.adoc @@ -1,14 +1,14 @@ :_mod-docs-content-type: ASSEMBLY [id="virt-accessing-vm-secondary-network-fqdn"] -= Accessing a virtual machine by using the cluster FQDN += Accessing a virtual machine by using its external FQDN include::_attributes/common-attributes.adoc[] :context: virt-accessing-vm-secondary-network-fqdn toc::[] -You can access a virtual machine (VM) that is attached to a secondary network interface from outside the cluster by using the fully qualified domain name (FQDN) of the cluster. +You can access a virtual machine (VM) that is attached to a secondary network interface from outside the cluster by using its fully qualified domain name (FQDN). -:FeatureName: Accessing VMs by using the cluster FQDN +:FeatureName: Accessing a VM from outside the cluster by using its FQDN include::snippets/technology-preview.adoc[] include::modules/virt-configuring-secondary-dns-server.adoc[leveloffset=+1] diff --git a/welcome/about-hcp.adoc b/welcome/about-hcp.adoc index 4c3aeff28fdb..aa4568d23873 100644 --- a/welcome/about-hcp.adoc +++ b/welcome/about-hcp.adoc @@ -13,7 +13,7 @@ toc::[] * {hcp-title} requires a minimum of only two nodes, making it ideal for smaller projects while still being able to scale to support larger projects and enterprises. -* The underlying control plane infrastructure is fully managed. Control plane components, such as the API server and etcd database, are hosted in a Red Hat-owned AWS account. +* The underlying control plane infrastructure is fully managed. Control plane components, such as the API server and etcd database, are hosted in a Red{nbsp}Hat-owned AWS account. * Provisioning time is approximately 10 minutes. @@ -81,11 +81,11 @@ Use the following sections to find content to help you learn about and use {hcp- |=== |Learn about application development in {hcp-title} |Deploy applications |Additional resources -| link:https://developers.redhat.com/[Red Hat Developers site] +| link:https://developers.redhat.com/[Red{nbsp}Hat Developers site] | xref:../applications/index.adoc#building-applications-overview[Building applications overview] | xref:../support/index.adoc#support-overview[Getting support] -| link:https://developers.redhat.com/products/openshift-dev-spaces/overview[{openshift-dev-spaces-productname} (formerly Red Hat CodeReady Workspaces)] +| link:https://developers.redhat.com/products/openshift-dev-spaces/overview[{openshift-dev-spaces-productname} (formerly Red{nbsp}Hat CodeReady Workspaces)] | xref:../operators/index.adoc#operators-overview[Operators overview] | diff --git a/welcome/cloud-experts-rosa-hcp-sts-explained.adoc b/welcome/cloud-experts-rosa-hcp-sts-explained.adoc index 9a62f2edacac..4f8bb5f4e6d7 100644 --- a/welcome/cloud-experts-rosa-hcp-sts-explained.adoc +++ b/welcome/cloud-experts-rosa-hcp-sts-explained.adoc @@ -15,12 +15,12 @@ toc::[] [id="credential-methods-rosa-hcp"] == AWS STS credential method -As part of {hcp-title}, Red Hat must be granted the necessary permissions to manage infrastructure resources in your AWS account. -{hcp-title} grants the cluster's automation software limited, short-term access to resources in your AWS account. +As part of {hcp-title}, Red{nbsp}Hat must be granted the necessary permissions to manage infrastructure resources in your AWS account. +{hcp-title} grants the cluster's automation software limited, short-term access to resources in your AWS account. The STS method uses predefined roles and policies to grant temporary, least-privilege permissions to IAM roles. The credentials typically expire an hour after being requested. Once expired, they are no longer recognized by AWS and no longer have account access from API requests made with them. For more information, see the link:https://docs.aws.amazon.com/STS/latest/APIReference/welcome.html[AWS documentation]. -AWS IAM STS roles must be created for each {hcp-title} cluster. The ROSA command line interface (CLI) (`rosa`) manages the STS roles and helps you attach the ROSA-specific, AWS-managed policies to each role. The CLI provides the commands and files to create the roles, attach the AWS-managed policies, and an option to allow the CLI to automatically create the roles and attach the policies. +AWS IAM STS roles must be created for each {hcp-title} cluster. The ROSA command line interface (CLI) (`rosa`) manages the STS roles and helps you attach the ROSA-specific, AWS-managed policies to each role. The CLI provides the commands and files to create the roles, attach the AWS-managed policies, and an option to allow the CLI to automatically create the roles and attach the policies. //See [insert new xref when we have one for HCP] for more information about the different `--mode` options. [id="hcp-sts-security"] @@ -90,7 +90,7 @@ Deploying a {hcp-title} cluster follows the following steps: . You create the account-wide roles. . You create the Operator roles. -. Red Hat uses AWS STS to send the required permissions to AWS that allow AWS to create and attach the corresponding AWS-manged Operator policies. +. Red{nbsp}Hat uses AWS STS to send the required permissions to AWS that allow AWS to create and attach the corresponding AWS-managed Operator policies. . You create the OIDC provider. . You create the cluster. @@ -101,7 +101,7 @@ The ROSA CLI can automatically create the roles for you, or you can manually cre [id="hcp-sts-process"] == {hcp-title} workflow -The user creates the required account-wide roles. During role creation, a trust policy, known as a cross-account trust policy, is created which allows a Red Hat-owned role to assume the roles. Trust policies are also created for the EC2 service, which allows workloads on EC2 instances to assume roles and obtain credentials. AWS assigns a corresponding permissions policy to each role. +The user creates the required account-wide roles. During role creation, a trust policy, known as a cross-account trust policy, is created which allows a Red{nbsp}Hat-owned role to assume the roles. Trust policies are also created for the EC2 service, which allows workloads on EC2 instances to assume roles and obtain credentials. AWS assigns a corresponding permissions policy to each role. After the account-wide roles and policies are created, the user can create a cluster. Once cluster creation is initiated, the user creates the Operator roles so that cluster Operators can make AWS API calls. These roles are then assigned to the corresponding permission policies that were created earlier and a trust policy with an OIDC provider. The Operator roles differ from the account-wide roles in that they ultimately represent the pods that need access to AWS resources. Because a user cannot attach IAM roles to pods, they must create a trust policy with an OIDC provider so that the Operator, and therefore the pods, can access the roles they need. @@ -109,7 +109,7 @@ Once the user assigns the roles to the corresponding policy permissions, the fin image::cloud-experts-sts-explained_creation_flow_hcp.png[] -When a new role is needed, the workload currently using the Red Hat role will assume the role in the AWS account, obtain temporary credentials from AWS STS, and begin performing the actions using API calls within the user's AWS account as permitted by the assumed role's permissions policy. The credentials are temporary and have a maximum duration of one hour. +When a new role is needed, the workload currently using the Red{nbsp}Hat role will assume the role in the AWS account, obtain temporary credentials from AWS STS, and begin performing the actions using API calls within the user's AWS account as permitted by the assumed role's permissions policy. The credentials are temporary and have a maximum duration of one hour. image::cloud-experts-sts-explained_highlevel.png[] @@ -119,4 +119,4 @@ image::cloud-experts-sts-explained_highlevel.png[] Operators use the following process to obtain the requisite credentials to perform their tasks. Each Operator is assigned an Operator role, a permissions policy, and a trust policy with an OIDC provider. The Operator will assume the role by passing a JSON web token that contains the role and a token file (`web_identity_token_file`) to the OIDC provider, which then authenticates the signed key with a public key. The public key is created during cluster creation and stored in an S3 bucket. The Operator then confirms that the subject in the signed token file matches the role in the role trust policy which ensures that the OIDC provider can only obtain the allowed role. The OIDC provider then returns the temporary credentials to the Operator so that the Operator can make AWS API calls. For a visual representation, see the following diagram: -image::cloud-experts-sts-explained_oidc_op_roles_hcp.png[] \ No newline at end of file +image::cloud-experts-sts-explained_oidc_op_roles_hcp.png[] diff --git a/welcome/index.adoc b/welcome/index.adoc index 29635c4804a2..249027e46e8b 100644 --- a/welcome/index.adoc +++ b/welcome/index.adoc @@ -72,17 +72,18 @@ Explore the following {product-title} installation tasks: - **xref:../installing/index.adoc#ocp-installation-overview[{product-title} installation overview]**: Depending on the platform, you can install {product-title} on installer-provisioned or user-provisioned infrastructure. The {product-title} installation program provides the flexibility to deploy {product-title} on a range of different platforms. -- **xref:../installing/installing_alibaba/preparing-to-install-on-alibaba.adoc#preparing-to-install-on-alibaba[Install a cluster on Alibaba]**: On Alibaba Cloud, you can install {product-title} on installer-provisioned infrastructure. This is currently a Technology Preview feature only. +// PR open https://github.com/openshift/openshift-docs/pull/77474 +//- **xref:../installing/installing_alibaba/installing-alibaba-assisted-installer[Installing a cluster on {alibaba} by using the Assisted Installer]**: On {alibaba}, you can install {product-title} by using the Assisted Installer. This is currently a Technology Preview feature only. -- **xref:../installing/installing_aws/preparing-to-install-on-aws.adoc#preparing-to-install-on-aws[Install a cluster on AWS]**: On AWS, you can install {product-title} on installer-provisioned infrastructure or user-provisioned infrastructure. +- **xref:../installing/installing_aws/preparing-to-install-on-aws.adoc#preparing-to-install-on-aws[Install a cluster on {aws-short}]**: On AWS, you can install {product-title} on installer-provisioned infrastructure or user-provisioned infrastructure. -- **xref:../installing/installing_azure/preparing-to-install-on-azure.adoc#preparing-to-install-on-azure[Install a cluster on Azure]**: On Microsoft Azure, you can install {product-title} on installer-provisioned infrastructure or user-provisioned infrastructure. +- **xref:../installing/installing_azure/preparing-to-install-on-azure.adoc#preparing-to-install-on-azure[Install a cluster on {azure-full}]**: On Microsoft Azure, you can install {product-title} on installer-provisioned infrastructure or user-provisioned infrastructure. -- **xref:../installing/installing_azure_stack_hub/preparing-to-install-on-azure-stack-hub.adoc#preparing-to-install-on-azure-stack-hub[Install a cluster on Azure Stack Hub]**: On Microsoft Azure Stack Hub, you can install {product-title} on installer-provisioned infrastructure or user-provisioned infrastructure. +- **xref:../installing/installing_azure_stack_hub/preparing-to-install-on-azure-stack-hub.adoc#preparing-to-install-on-azure-stack-hub[Install a cluster on {azure-full} Stack Hub]**: On Microsoft Azure Stack Hub, you can install {product-title} on installer-provisioned infrastructure or user-provisioned infrastructure. -- **xref:../installing/installing_on_prem_assisted/installing-on-prem-assisted.html#using-the-assisted-installer_installing-on-prem-assisted[Installing {product-title} with the Assisted Installer]**: The Assisted Installer is an installation solution that is provided on the Red Hat {hybrid-console}. The Assisted Installer supports installing an {product-title} cluster on many platforms, but with a focus on bare metal, Nutanix, and {vmw-full} infrastructures. +- **xref:../installing/installing_on_prem_assisted/installing-on-prem-assisted.adoc#using-the-assisted-installer_installing-on-prem-assisted[Installing {product-title} with the Assisted Installer]**: The Assisted Installer is an installation solution that is provided on the Red Hat {hybrid-console}. The Assisted Installer supports installing an {product-title} cluster on multiple platforms. -- **xref:../installing/installing_with_agent_based_installer/installing-with-agent-based-installer.html#installing-ocp-agent_installing-with-agent-based-installer[Installing {product-title} with the Agent-based Installer]**: You can use the Agent-based Installer to generate a bootable ISO image that contains the Assisted discovery agent, the Assisted Service, and all the other information required to deploy an {product-title} cluster. The Agent-based Installer leverages the advantages of the Assisted Installer in a disconnected environment +- **xref:../installing/installing_with_agent_based_installer/installing-with-agent-based-installer.adoc#installing-ocp-agent_installing-with-agent-based-installer[Installing {product-title} with the Agent-based Installer]**: You can use the Agent-based Installer to generate a bootable ISO image that contains the Assisted discovery agent, the Assisted Service, and all the other information required to deploy an {product-title} cluster. The Agent-based Installer leverages the advantages of the Assisted Installer in a disconnected environment - **xref:../installing/installing_bare_metal/preparing-to-install-on-bare-metal.adoc#preparing-to-install-on-bare-metal[Install a cluster on bare metal]**: On bare metal, you can install {product-title} on installer-provisioned infrastructure or user-provisioned infrastructure. If none of the available platform and cloud provider deployment options meet your needs, consider using bare metal user-provisioned infrastructure. @@ -100,24 +101,18 @@ endif::openshift-origin[] - **Install a cluster on {oci-first}**: You can use the {ai-full} or the Agent-based Installer to install a cluster on {oci}. This means that you can run cluster workloads on infrastructure that supports dedicated, hybrid, public, and multiple cloud environments. See xref:../installing/installing_oci/installing-oci-assisted-installer.adoc#installing-oci-assisted-installer[Installing a cluster on {oci-first-no-rt} by using the {ai-full}] and xref:../installing/installing_oci/installing-oci-agent-based-installer.adoc#installing-oci-agent-based-installer[Installing a cluster on {oci-first-no-rt} by using the Agent-based Installer]. -- **xref:../installing/installing_nutanix/preparing-to-install-on-nutanix.html#preparing-to-install-nutanix[Install a cluster on Nutanix]**: On Nutanix, you can install a cluster on your {product-title} on installer-provisioned infrastructure. +- **xref:../installing/installing_nutanix/preparing-to-install-on-nutanix.adoc#preparing-to-install-nutanix[Install a cluster on Nutanix]**: On Nutanix, you can install a cluster on your {product-title} on installer-provisioned infrastructure. - **xref:../installing/installing_openstack/preparing-to-install-on-openstack.adoc#preparing-to-install-on-openstack[Install a cluster on {rh-openstack-first}]**: On {rh-openstack}, you can install {product-title} on installer-provisioned infrastructure or user-provisioned infrastructure. - **xref:../installing/installing_vsphere/ipi/installing-vsphere-installer-provisioned.adoc#installing-vsphere-installer-provisioned[Install a cluster on {vmw-full}]**: You can install {product-title} on supported versions of {vmw-short}. -//// -You can configure an external load balancer for -xref:../installing/installing_openstack/installing-openstack-load-balancing.adoc#installing-openstack-load-balancing[load balancing deployments on OpenStack]. -To troubleshoot OpenStack installation issues, you can -xref:../installing/installing_openstack/installing-openstack-troubleshooting.adoc#installing-openstack-troubleshooting[view instance logs and ssh to an instance]. -//// == Other cluster installer activities ifndef::openshift-origin[] - **Install a cluster in a restricted network**: If your cluster uses user-provisioned infrastructure on -xref:../installing/installing_aws/upi/installing-restricted-networks-aws.adoc#installing-restricted-networks-aws[AWS], +xref:../installing/installing_aws/upi/installing-restricted-networks-aws.adoc#installing-restricted-networks-aws[{aws-first}], xref:../installing/installing_gcp/installing-restricted-networks-gcp.adoc#installing-restricted-networks-gcp[{gcp-short}], xref:../installing/installing_vsphere/upi/installing-restricted-networks-vsphere.adoc#installing-restricted-networks-vsphere[{vmw-short}], xref:../installing/installing_ibm_cloud_public/installing-ibm-cloud-restricted.adoc#installing-ibm-cloud-restricted[{ibm-cloud-name}], xref:../installing/installing_ibm_z/preparing-to-install-on-ibm-z.adoc#preparing-to-install-on-ibm-z[{ibm-z-name} and {ibm-linuxone-name}], xref:../installing/installing_ibm_power/installing-restricted-networks-ibm-power.adoc#installing-restricted-networks-ibm-power[{ibm-power-name}], or @@ -125,41 +120,41 @@ xref:../installing/installing_bare_metal/installing-restricted-networks-bare-met does not have full access to the internet, you must mirror the {product-title} installation images. To do this action, use one of the following methods, so that you can install a cluster in a restricted network. *** xref:../installing/disconnected_install/installing-mirroring-installation-images.adoc#installing-mirroring-installation-images[Mirroring images for a disconnected installation] *** xref:../installing/disconnected_install/installing-mirroring-disconnected.adoc#installing-mirroring-disconnected[Mirroring images for a disconnected installation by using the oc-mirror plug-in] - endif::openshift-origin[] ifdef::openshift-origin[] - **Install a cluster in a restricted network**: If your cluster that uses user-provisioned infrastructure on -xref:../installing/installing_aws/upi/installing-restricted-networks-aws.adoc#installing-restricted-networks-aws[AWS], +xref:../installing/installing_aws/upi/installing-restricted-networks-aws.adoc#installing-restricted-networks-aws[{aws-first}], xref:../installing/installing_gcp/installing-restricted-networks-gcp.adoc#installing-restricted-networks-gcp[{gcp-short}], or xref:../installing/installing_bare_metal/installing-restricted-networks-bare-metal.adoc#installing-restricted-networks-bare-metal[bare metal] does not have full access to the internet, then xref:../installing/disconnected_install/installing-mirroring-installation-images.adoc#installing-mirroring-installation-images[mirror the {product-title} installation images] and install a cluster in a restricted network. - endif::openshift-origin[] - **Install a cluster in an existing network**: If you use an existing Virtual Private Cloud (VPC) in -xref:../installing/installing_aws/ipi/installing-aws-vpc.adoc#installing-aws-vpc[AWS] or +xref:../installing/installing_aws/ipi/installing-aws-vpc.adoc#installing-aws-vpc[{aws-first}] or xref:../installing/installing_gcp/installing-gcp-vpc.adoc#installing-gcp-vpc[{gcp-short}] or an existing xref:../installing/installing_azure/installing-azure-vnet.adoc#installing-azure-vnet[VNet] on Microsoft Azure, you can install a cluster. Also consider xref:../installing/installing_gcp/installing-gcp-shared-vpc.adoc#installation-gcp-shared-vpc-prerequisites_installing-gcp-shared-vpc[Installing a cluster on {gcp-short} into a shared VPC] - **Install a private cluster**: If your cluster does not require external internet access, you can install a private cluster on -xref:../installing/installing_aws/ipi/installing-aws-private.adoc#installing-aws-private[AWS], -xref:../installing/installing_azure/installing-azure-private.adoc#installing-aws-private[Azure], +xref:../installing/installing_aws/ipi/installing-aws-private.adoc#installing-aws-private[{aws-first}], +xref:../installing/installing_azure/installing-azure-private.adoc#installing-aws-private[{azure-full}], xref:../installing/installing_gcp/installing-gcp-private.adoc#installing-gcp-private[{gcp-short}], or xref:../installing/installing_ibm_cloud_public/preparing-to-install-on-ibm-cloud.adoc#preparing-to-install-on-ibm-cloud[{ibm-cloud-name}]. Internet access is still required to access the cloud APIs and installation media. +- **xref:../installing/installing_bare_metal/installing-bare-metal.adoc#rhcos-install-iscsi-manual_installing-bare-metal[Installing RHCOS manually on an iSCSI boot device] and xref:../installing/installing_bare_metal/installing-bare-metal.adoc#rhcos-install-iscsi-ibft_installing-bare-metal[Installing RHCOS on an iSCSI boot device using iBFT]**: You can target iSCSI devices as the root disk for installation of {op-system}. Multipathing is also supported. + - **xref:../installing/installing-troubleshooting.adoc#installing-troubleshooting[Check installation logs]**: Access installation logs to evaluate issues that occur during {product-title} installation. - **xref:../web_console/web-console.adoc#web-console[Access {product-title}]**: Use credentials output at the end of the installation process to log in to the {product-title} cluster from the command line or web console. - **xref:../storage/persistent_storage/persistent-storage-ocs.adoc#red-hat-openshift-data-foundation[Install Red Hat OpenShift Data Foundation]**: You can install {rh-storage-first} as an Operator to provide highly integrated and simplified persistent storage management for containers. -- **xref:../post_installation_configuration/coreos-layering.adoc#coreos-layering[{op-system-first} image layering]**: As a post-installation task, you can add new images on top of the base {op-system} image. This layering does not modify the base {op-system} image. Instead, the layering creates a custom layered image that includes all {op-system} functions and adds additional functions to specific nodes in the cluster. +- **xref:../machine_configuration/mco-coreos-layering.adoc#mco-coreos-layering[{op-system-first} image layering]**: As a post-installation task, you can add new images on top of the base {op-system} image. This layering does not modify the base {op-system} image. Instead, the layering creates a custom layered image that includes all {op-system} functions and adds additional functions to specific nodes in the cluster. endif::[] ifndef::openshift-rosa,openshift-dedicated,openshift-dpu,microshift[] @@ -174,13 +169,10 @@ Develop and deploy containerized applications with {product-title}. {product-tit - **xref:../applications/odc-viewing-application-composition-using-topology-view.adoc#odc-viewing-application-topology_viewing-application-composition-using-topology-view[Viewing application composition using the Topology view]**: Use the *Topology* view to visually interact with your applications, monitor status, connect and group components, and modify your code base. -- **xref:../applications/connecting_applications_to_services/understanding-service-binding-operator.adoc#understanding-service-binding-operator[Understanding Service Binding Operator]**: With the Service Binding Operator, an application developer can bind workloads with Operator-managed backing services by automatically collecting and sharing binding data with the workloads. The Service Binding Operator improves the development lifecycle with a consistent and declarative service binding method that prevents discrepancies in cluster environments. - - **link:https://docs.openshift.com/pipelines/latest/create/creating-applications-with-cicd-pipelines.html#creating-applications-with-cicd-pipelines[Create CI/CD Pipelines]**: Pipelines are serverless, cloud-native, continuous integration and continuous deployment systems that run in isolated containers. Pipelines use standard Tekton custom resources to automate deployments and are designed for decentralized teams that work on microservice-based architecture. ifdef::openshift-enterprise,openshift-webscale,openshift-origin[] - - **link:https://docs.openshift.com/gitops/latest/understanding_openshift_gitops/about-redhat-openshift-gitops.html#about-redhat-openshift-gitops[Manage your infrastructure and application configurations]**: GitOps is a declarative way to implement continuous deployment for cloud native applications. GitOps defines infrastructure and application definitions as code. GitOps uses this code to manage multiple workspaces and clusters to simplify the creation of infrastructure and application configurations. GitOps also handles and automates complex deployments at a fast pace, which saves time during deployment and release cycles. - **xref:../applications/working_with_helm_charts/configuring-custom-helm-chart-repositories.adoc#installing-a-helm-chart-on-an-openshift-cluster_configuring-custom-helm-chart-repositories[Deploy Helm charts]**: @@ -252,16 +244,7 @@ Manage machines, provide services to users, and follow monitoring and logging re - **Manage xref:../security/certificates/replacing-default-ingress-certificate.adoc#replacing-default-ingress[ingress], xref:../security/certificates/api-server.adoc#api-server-certificates[API server], and xref:../security/certificates/service-serving-certificate.adoc#add-service-serving[service] certificates**: {product-title} creates certificates by default for the Ingress Operator, the API server, and for services needed by complex middleware applications that require encryption. You might need to change, add, or rotate these certificates. - **xref:../networking/understanding-networking.adoc#understanding-networking[Manage networking]**: The cluster network in {product-title} is managed by the xref:../networking/cluster-network-operator.adoc#cluster-network-operator[Cluster Network Operator] (CNO). The CNO uses `iptables` rules in xref:../networking/openshift_sdn/configuring-kube-proxy.adoc#configuring-kube-proxy[kube-proxy] to direct traffic between nodes and pods running on those nodes. The Multus Container Network Interface adds the capability to attach xref:../networking/multiple_networks/understanding-multiple-networks.adoc#understanding-multiple-networks[multiple network interfaces] to a pod. By using -xref:../networking/openshift_network_security/network_policy/about-network-policy.adoc#about-network-policy[network policy] features, you can isolate your pods or permit selected traffic. - -- **xref:../storage/understanding-persistent-storage.adoc#understanding-persistent-storage[Manage storage]**: With {product-title}, a cluster administrator can configure persistent storage by using -xref:../storage/persistent_storage/persistent-storage-ocs.adoc#red-hat-openshift-data-foundation[Red Hat OpenShift Data Foundation], -xref:../storage/persistent_storage/persistent-storage-aws.adoc#persistent-storage-using-aws-ebs[AWS Elastic Block Store], -xref:../storage/persistent_storage/persistent-storage-nfs.adoc#persistent-storage-using-nfs[NFS], -xref:../storage/persistent_storage/persistent-storage-iscsi.adoc#persistent-storage-using-iscsi[iSCSI], -xref:../storage/container_storage_interface/persistent-storage-csi.adoc#persistent-storage-using-csi[Container Storage Interface (CSI)], -and more. -You can xref:../storage/expanding-persistent-volumes.adoc#expanding-persistent-volumes[expand persistent volumes], configure xref:../storage/dynamic-provisioning.adoc#dynamic-provisioning[dynamic provisioning], and use CSI to xref:../storage/container_storage_interface/persistent-storage-csi.adoc#persistent-storage-using-csi[configure], xref:../storage/container_storage_interface/persistent-storage-csi-cloning.adoc#persistent-storage-csi-cloning[clone], and use xref:../storage/container_storage_interface/persistent-storage-csi-snapshots.adoc#persistent-storage-csi-snapshots[snapshots] of persistent storage. +xref:../networking/network_security/network_policy/about-network-policy.adoc#about-network-policy[network policy] features, you can isolate your pods or permit selected traffic. - **xref:../operators/understanding/olm-understanding-operatorhub.adoc#olm-understanding-operatorhub[Manage Operators]**: Lists of Red Hat, ISV, and community Operators can be reviewed by cluster administrators and xref:../operators/admin/olm-adding-operators-to-cluster.adoc#olm-adding-operators-to-a-cluster[installed on their clusters]. After you install them, you can xref:../operators/user/olm-creating-apps-from-installed-operators.adoc#olm-creating-apps-from-installed-operators[run], xref:../operators/admin/olm-upgrading-operators.adoc#olm-upgrading-operators[upgrade], back up, or otherwise manage the Operator on your cluster. @@ -278,11 +261,11 @@ endif::openshift-enterprise,openshift-webscale,openshift-origin[] - **xref:../applications/pruning-objects.adoc#pruning-objects[Prune and reclaim resources]**: Reclaim space by pruning unneeded Operators, groups, deployments, builds, images, registries, and cron jobs. - **xref:../scalability_and_performance/recommended-performance-scale-practices/recommended-infrastructure-practices.adoc#scaling-cluster-monitoring-operator[Scale] and xref:../scalability_and_performance/using-node-tuning-operator.adoc#using-node-tuning-operator[tune] clusters**: Set cluster limits, tune nodes, scale cluster monitoring, and optimize networking, storage, and routes for your environment. - -- **xref:../updating/understanding_updates/intro-to-updates.html[Update a cluster]**: +// Added context here. +- **xref:../updating/understanding_updates/intro-to-updates.adoc#understanding-openshift-updates[Update a cluster]**: Use the Cluster Version Operator (CVO) to upgrade your {product-title} cluster. If an update is available from the OpenShift Update Service (OSUS), you apply that cluster update from the {product-title} xref:../updating/updating_a_cluster/updating-cluster-web-console.adoc#updating-cluster-web-console[web console] or the xref:../updating/updating_a_cluster/updating-cluster-cli.adoc#updating-cluster-cli[OpenShift CLI] (`oc`). -- **xref:../updating/updating_a_cluster/updating_disconnected_cluster/index.html[Using the OpenShift Update Service in a disconnected environment]**: You can use the OpenShift Update Service for recommending {product-title} updates in disconnected environments. +- **xref:../updating/updating_a_cluster/updating_disconnected_cluster/index.adoc#about-restricted-network-updates[Using the OpenShift Update Service in a disconnected environment]**: You can use the OpenShift Update Service for recommending {product-title} updates in disconnected environments. - **xref:../nodes/clusters/nodes-cluster-worker-latency-profiles.adoc#nodes-cluster-worker-latency-profiles[Improving cluster stability in high latency environments by using worker latency profiles]**: If your network has latency issues, you can use one of three worker latency profiles to help ensure that your control plane does not accidentally evict pods in case it cannot reach a worker node. You can configure or modify the profile at any time during the life of the cluster. @@ -304,6 +287,25 @@ After configuring monitoring, use the web console to access xref:../observabilit - **xref:../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring_about-remote-health-monitoring[Remote health monitoring]**: {product-title} collects anonymized aggregated information about your cluster. By using Telemetry and the Insights Operator, this data is received by Red Hat and used to improve {product-title}. You can view the xref:../support/remote_health_monitoring/showing-data-collected-by-remote-health-monitoring.adoc#showing-data-collected-by-remote-health-monitoring_showing-data-collected-by-remote-health-monitoring[data collected by remote health monitoring]. - **xref:../observability/power_monitoring/power-monitoring-overview.adoc#power-monitoring-overview[{PM-title-c} (Technology Preview)]**: You can use {PM-title} to monitor the power usage and identify power-consuming containers running in an {product-title} cluster. {PM-shortname-c} collects and exports energy-related system statistics from various components, such as CPU and DRAM. {PM-shortname-c} provides granular power consumption data for Kubernetes pods, namespaces, and nodes. + +== Storage activities + +- **xref:../storage/understanding-persistent-storage.adoc#understanding-persistent-storage[Manage storage]**: With {product-title}, a cluster administrator can configure persistent storage by using +xref:../storage/persistent_storage/persistent-storage-ocs.adoc#red-hat-openshift-data-foundation[Red Hat OpenShift Data Foundation], +xref:../storage/persistent_storage/persistent-storage-aws.adoc#persistent-storage-using-aws-ebs[{aws-short} Elastic Block Store], +xref:../storage/persistent_storage/persistent-storage-nfs.adoc#persistent-storage-using-nfs[NFS], +xref:../storage/persistent_storage/persistent-storage-iscsi.adoc#persistent-storage-using-iscsi[iSCSI], +xref:../storage/container_storage_interface/persistent-storage-csi.adoc#persistent-storage-using-csi[Container Storage Interface (CSI)], +and more. +You can xref:../storage/expanding-persistent-volumes.adoc#expanding-persistent-volumes[expand persistent volumes], configure xref:../storage/dynamic-provisioning.adoc#dynamic-provisioning[dynamic provisioning], and use CSI to xref:../storage/container_storage_interface/persistent-storage-csi.adoc#persistent-storage-using-csi[configure], xref:../storage/container_storage_interface/persistent-storage-csi-cloning.adoc#persistent-storage-csi-cloning[clone], and use xref:../storage/container_storage_interface/persistent-storage-csi-snapshots.adoc#persistent-storage-csi-snapshots[snapshots] of persistent storage. + +- **xref:../storage/container_storage_interface/persistent-storage-csi-smb-cifs.adoc#persistent-storage-csi-smb-cifs[Persistent storage using CIFS/SMB CSI Driver Operator (Technology Preview)]**: {product-title} is capable of provisioning persistent volumes (PVs) with a Container Storage Interface (CSI) driver for the Common Internet File System (CIFS) dialect/Server Message Block (SMB) protocol. The CIFS/SMB CSI Driver Operator that manages this driver is in Technology Preview status. + +- **xref:../storage/container_storage_interface/persistent-storage-csi-snapshots.adoc#persistent-storage-csi-snapshots-overview_persistent-storage-csi-snapshots[Changing vSphere CSI maximum number of snapshots]**: The default maximum number of snapshots in {vmw-first} Container Storage Interface (CSI) is 3 per volume. In {product-title} {product-version}, you can now change this maximum number of snapshots to a maximum of 32 per volume. You also have granular control of the maximum number of snapshots for vSAN and Virtual Volume datastores. + +- **xref:../storage/container_storage_interface/persistent-storage-csi.adoc#persistent-storage-csi[Volume cloning supported for Azure File (Technology Preview)]**: {product-title} {product-version} introduces volume cloning for the Microsoft Azure File Container Storage Interface (CSI) Driver Operator as a Technology Preview feature. + +- **xref:../storage/understanding-persistent-storage.adoc#pv-access-modes_understanding-persistent-storage[RWOP with SELinux context mount]**: {product-title} {product-version} changes feature status from Technical Preview status to generally available for the access mode `ReadWriteOncePod` (RWOP). RWOP can be used only in a single pod on a single node. If the driver enables it, RWOP uses the SELinux context mount set in the PodSpec or container, which allows the driver to mount the volume directly with the correct SELinux labels. endif::openshift-enterprise,openshift-webscale,openshift-origin[] ifdef::openshift-dedicated[] @@ -317,7 +319,6 @@ While cluster maintenance and host configuration is performed by the Red Hat Sit - *Manage nodes*: Learn to manage nodes, including configuring machine pools and autoscaling. endif::openshift-dedicated[] endif::openshift-enterprise,openshift-webscale,openshift-origin[] - endif::openshift-rosa[] ifdef::openshift-enterprise[] @@ -328,11 +329,10 @@ ifdef::openshift-enterprise[] ** link:https://access.redhat.com/documentation/en-us/red_hat_advanced_cluster_management_for_kubernetes/2.10/html/clusters/cluster_mce_overview#configuring-hosting-service-cluster-configure-bm[Configuring hosted control plane clusters on bare metal] ** link:https://access.redhat.com/documentation/en-us/red_hat_advanced_cluster_management_for_kubernetes/2.10/html/clusters/cluster_mce_overview#hosted-control-planes-manage-kubevirt[Managing hosted control plane clusters on OpenShift Virtualization] -* **Technology Preview features**: {hcp-capital} remains available as a Technology Preview feature on the Amazon Web Services, {ibm-power-name}, and {ibm-z-name} platforms. You can now provision a hosted control plane cluster by using the non bare metal agent machines. For more information, see the following documentation: +* **Technology Preview features**: {hcp-capital} remains available as a Technology Preview feature on the {aws-first}, {ibm-power-name}, and {ibm-z-name} platforms. You can now provision a hosted control plane cluster by using the non bare metal agent machines. For more information, see the following documentation: -** link:https://access.redhat.com/documentation/en-us/red_hat_advanced_cluster_management_for_kubernetes/2.10/html/clusters/cluster_mce_overview#hosting-service-cluster-configure-aws[Configuring the hosting cluster on AWS (Technology Preview)] +** link:https://access.redhat.com/documentation/en-us/red_hat_advanced_cluster_management_for_kubernetes/2.10/html/clusters/cluster_mce_overview#hosting-service-cluster-configure-aws[Configuring the hosting cluster on {aws-short} (Technology Preview)] ** link:https://access.redhat.com/documentation/en-us/red_hat_advanced_cluster_management_for_kubernetes/2.10/html/clusters/cluster_mce_overview#config-hosted-service-ibmpower[Configuring the hosting cluster on a 64-bit x86 {product-title} cluster to create {hcp} for {ibm-power-name} compute nodes (Technology Preview)] ** link:https://access.redhat.com/documentation/en-us/red_hat_advanced_cluster_management_for_kubernetes/2.10/html/clusters/cluster_mce_overview#configuring-hosting-service-cluster-ibmz[Configuring the hosted cluster on 64-bit x86 bare metal for {ibm-z-name} compute nodes (Technology Preview)] ** link:https://access.redhat.com/documentation/en-us/red_hat_advanced_cluster_management_for_kubernetes/2.10/html/clusters/cluster_mce_overview#configuring-hosting-service-cluster-configure-agent-non-bm[Configuring hosted control plane clusters using non bare metal agent machines (Technology Preview)] - endif::openshift-enterprise[] diff --git a/welcome/learn_more_about_openshift.adoc b/welcome/learn_more_about_openshift.adoc index c325385c7b7c..cd0d2a02d12a 100644 --- a/welcome/learn_more_about_openshift.adoc +++ b/welcome/learn_more_about_openshift.adoc @@ -46,7 +46,7 @@ Use the following sections to find content to help you learn about and use {prod | xref:../support/getting-support.adoc#getting-support[Getting Support] | xref:../architecture/architecture.adoc#architecture[Architecture] -| xref:../post_installation_configuration/machine-configuration-tasks.adoc#post-install-machine-configuration-tasks[Post installation configuration] +| xref:../machine_configuration/index.adoc#machine-config-overview[Machine configuration overview] | xref:../observability/logging/cluster-logging.adoc#cluster-logging[Logging] | link:https://access.redhat.com/articles/4217411[OpenShift Knowledgebase articles] diff --git a/welcome/oke_about.adoc b/welcome/oke_about.adoc index fc91ae983ca1..d9136b3d5c82 100644 --- a/welcome/oke_about.adoc +++ b/welcome/oke_about.adoc @@ -297,7 +297,6 @@ s| Feature s| {oke} s| {product-title} s| Operator name | Developer Application Catalog | Not Included | Included | N/A | Source to Image and Builder Automation (Tekton) | Not Included | Included | N/A | OpenShift Service Mesh | Not Included | Included | OpenShift Service Mesh Operator -| Service Binding Operator | Not Included | Included | Service Binding Operator s| Feature s| {oke} s| {product-title} s| Operator name | Red Hat OpenShift Serverless | Not Included | Included | OpenShift Serverless Operator | Web Terminal provided by Red Hat | Not Included | Included | Web Terminal Operator diff --git a/windows_containers/enabling-windows-container-workloads.adoc b/windows_containers/enabling-windows-container-workloads.adoc index 3ceddb629267..6688e173c5a8 100644 --- a/windows_containers/enabling-windows-container-workloads.adoc +++ b/windows_containers/enabling-windows-container-workloads.adoc @@ -46,6 +46,12 @@ include::modules/installing-wmco-using-cli.adoc[leveloffset=+2] include::modules/configuring-secret-for-wmco.adoc[leveloffset=+1] +include::modules/wmco-cluster-wide-proxy.adoc[leveloffset=+1] + +.Additional resources + +* xref:../networking/enable-cluster-wide-proxy.adoc#enable-cluster-wide-proxy[Configuring the cluster-wide proxy]. + [role="_additional-resources"] == Additional resources