diff --git a/.vscode/settings.json b/.vscode/settings.json
index 9e26dfeeb6..3968f72341 100644
--- a/.vscode/settings.json
+++ b/.vscode/settings.json
@@ -1 +1,5 @@
-{}
\ No newline at end of file
+{
+ "cSpell.words": [
+ "preconfigured"
+ ]
+}
\ No newline at end of file
diff --git a/artifacts/attributes.adoc b/artifacts/attributes.adoc
index 64a9378f6a..2f53795189 100644
--- a/artifacts/attributes.adoc
+++ b/artifacts/attributes.adoc
@@ -10,10 +10,13 @@
:product: Red Hat Developer Hub
:product-short: Developer Hub
:product-very-short: RHDH
-:product-version: 1.5
-:product-bundle-version: 1.5.0
-:product-chart-version: 1.5.0
-:product-backstage-version: 1.35.0
+:product-local: Red Hat Developer Hub Local
+:product-local-very-short: RHDH Local
+:product-version: 1.6
+:product-bundle-version: 1.6.0
+:product-chart-version: 1.6.0
+:product-backstage-version: 1.36.1
+:product-version-next: 1.7
:product-custom-resource-type: Backstage
:rhdeveloper-name: Red Hat Developer
:rhel: Red Hat Enterprise Linux
@@ -47,7 +50,7 @@
:rhsso: RHSSO
:rhbk-brand-name: Red Hat Build of Keycloak
:rhbk: RHBK
-:keycloak-version: 24.0
+:keycloak-version: 26.0
// RHTAP information
:rhtap-version: 1.3
@@ -140,7 +143,7 @@
:extend-category-url: https://docs.redhat.com/en/documentation/red_hat_developer_hub/{product-version}/#Extend
:introduction-to-plugins-book-url: https://docs.redhat.com/en/documentation/red_hat_developer_hub/{product-version}/html-single/introduction_to_plugins/index
:introduction-to-plugins-book-title: Introduction to plugins
-:configuring-dynamic-plugins-book-url: https://docs.redhat.com/en/documentation/red_hat_developer_hub/{product-version}/html-single/introduction_to_plugins/index
+:configuring-dynamic-plugins-book-url: https://docs.redhat.com/en/documentation/red_hat_developer_hub/{product-version}/html-single/configuring_dynamic_plugins/index
:configuring-dynamic-plugins-book-title: Configuring dynamic plugins
:installing-and-viewing-plugins-book-url: https://docs.redhat.com/en/documentation/red_hat_developer_hub/{product-version}/html-single/installing_and_viewing_plugins_in_red_hat_developer_hub/index
:installing-and-viewing-plugins-book-title: Installing and viewing plugins in {product}
diff --git a/artifacts/rhdh-plugins-reference/keycloak/keycloak-plugin-admin.adoc b/artifacts/rhdh-plugins-reference/keycloak/keycloak-plugin-admin.adoc
index d456e06c28..ece6fdd6ac 100644
--- a/artifacts/rhdh-plugins-reference/keycloak/keycloak-plugin-admin.adoc
+++ b/artifacts/rhdh-plugins-reference/keycloak/keycloak-plugin-admin.adoc
@@ -143,6 +143,64 @@ When using client credentials, the access type must be set to `confidential` and
* `query-users`
* `view-users`
+== Metrics
+
+The Keycloak backend plugin supports link:https://opentelemetry.io/[OpenTelemetry] metrics that you can use to monitor fetch operations and diagnose potential issues.
+
+=== Available Counters
+
+.Keycloak metrics
+[cols="60%,40%", frame="all", options="header"]
+|===
+|Metric Name
+|Description
+| `backend_keycloak_fetch_task_failure_count_total` | Counts fetch task failures where no data was returned due to an error.
+| `backend_keycloak_fetch_data_batch_failure_count_total` | Counts partial data batch failures. Even if some batches fail, the plugin continues fetching others.
+|===
+
+=== Labels
+
+All counters include the `taskInstanceId` label, which uniquely identifies each scheduled fetch task. You can use this label to trace failures back to individual task executions.
+
+Users can enter queries in the Prometheus UI or Grafana to explore and manipulate metric data.
+
+In the following examples, a Prometheus Query Language (PromQL) expression returns the number of backend failures.
+
+.Example to get the number of backend failures associated with a `taskInstanceId`
+[source,subs="+attributes,+quotes"]
+----
+backend_keycloak_fetch_data_batch_failure_count_total{taskInstanceId="df040f82-2e80-44bd-83b0-06a984ca05ba"} 1
+----
+
+.Example to get the number of backend failures during the last hour
+
+[source,subs="+attributes,+quotes"]
+----
+sum(backend_keycloak_fetch_data_batch_failure_count_total) - sum(backend_keycloak_fetch_data_batch_failure_count_total offset 1h)
+----
+
+[NOTE]
+====
+PromQL supports arithmetic operations, comparison operators, logical/set operations, aggregation, and various functions. Users can combine these features to analyze time-series data effectively.
+
+Additionally, the results can be visualized using Grafana.
+====
+
+// === Use Case Example
+
+// Imagine your Keycloak instance is under-provisioned (e.g., low CPU/RAM limits), and the plugin is configured to send many parallel API requests.
+// This could cause request timeouts or throttling. The metrics described above can help detect such behavior early, allowing administrators to:
+
+// - Tune the plugin configuration (e.g., reduce parallelism)
+// - Increase resources on the Keycloak server
+// - Investigate network or permission issues
+
+=== Exporting Metrics
+
+You can export metrics using any OpenTelemetry-compatible backend, such as *Prometheus*.
+
+See the link:https://backstage.io/docs/tutorials/setup-opentelemetry[Backstage OpenTelemetry setup guide] for integration instructions.
+
== Limitations
If you have self-signed or corporate certificate issues, you can set the following environment variable before starting {product-short}:
diff --git a/artifacts/rhdh-plugins-reference/keycloak/keycloak-plugin-readme.adoc b/artifacts/rhdh-plugins-reference/keycloak/keycloak-plugin-readme.adoc
index 263061178e..754855509a 100644
--- a/artifacts/rhdh-plugins-reference/keycloak/keycloak-plugin-readme.adoc
+++ b/artifacts/rhdh-plugins-reference/keycloak/keycloak-plugin-readme.adoc
@@ -6,9 +6,7 @@ The Keycloak backend plugin, which integrates Keycloak into {product-short}, has
* Synchronization of Keycloak users in a realm.
* Synchronization of Keycloak groups and their users in a realm.
-== For administrators
-
-=== Installation
+== Installation
The Keycloak plugin is pre-loaded in {product-short} with basic configuration properties. To enable it, set the `disabled` property to `false` as follows:
@@ -23,7 +21,7 @@ global:
disabled: false
----
-=== Basic configuration
+== Basic configuration
To enable the Keycloak plugin, you must set the following environment variables:
* `KEYCLOAK_BASE_URL`
@@ -36,7 +34,7 @@ To enable the Keycloak plugin, you must set the following environment variables:
* `KEYCLOAK_CLIENT_SECRET`
-=== Advanced configuration
+== Advanced configuration
.Schedule configuration
You can configure a schedule in the `app-config.yaml` file, as follows:
@@ -140,7 +138,7 @@ When using client credentials, the access type must be set to `confidential` and
* `query-users`
* `view-users`
-=== Limitations
+== Limitations
If you have self-signed or corporate certificate issues, you can set the following environment variable before starting {product-short}:
@@ -152,29 +150,3 @@ If you have self-signed or corporate certificate issues, you can set the followi
The solution of setting the environment variable is not recommended.
====
-== For users
-
-=== Import of users and groups in {product-short} using the Keycloak plugin
-
-After configuring the plugin successfully, the plugin imports the users and groups each time when started.
-
-[NOTE]
-====
-If you set up a schedule, users and groups will also be imported.
-====
-
-After the first import is complete, you can select *User* to list the users from the catalog page:
-
-image::rhdh-plugins-reference/users.jpg[catalog-list]
-
-You can see the list of users on the page:
-
-image::rhdh-plugins-reference/user-list.jpg[user-list]
-
-When you select a user, you can see the information imported from Keycloak:
-
-image::rhdh-plugins-reference/user2.jpg[user-profile]
-
-You can also select a group, view the list, and select or view the information imported from Keycloak for a group:
-
-image::rhdh-plugins-reference/group1.jpg[group-profile]
diff --git a/artifacts/rhdh-plugins-reference/keycloak/keycloak-plugin-user.adoc b/artifacts/rhdh-plugins-reference/keycloak/keycloak-plugin-user.adoc
index cf91a17ba0..5a427b6ae5 100644
--- a/artifacts/rhdh-plugins-reference/keycloak/keycloak-plugin-user.adoc
+++ b/artifacts/rhdh-plugins-reference/keycloak/keycloak-plugin-user.adoc
@@ -15,18 +15,11 @@ After configuring the plugin successfully, the plugin imports the users and grou
If you set up a schedule, users and groups will also be imported.
====
-After the first import is complete, you can select *User* to list the users from the catalog page:
-
-image::rhdh-plugins-reference/users.jpg[catalog-list]
-
-You can see the list of users on the page:
-
-image::rhdh-plugins-reference/user-list.jpg[user-list]
-
-When you select a user, you can see the information imported from Keycloak:
-
-image::rhdh-plugins-reference/user2.jpg[user-profile]
-
-You can also select a group, view the list, and select or view the information imported from Keycloak for a group:
-
-image::rhdh-plugins-reference/group1.jpg[group-profile]
+.Procedure
+. in {product}, go to the *Catalog* page.
+. Select *User* from the entity type filter to display the list of imported users.
+. Browse the list of users displayed on the page.
+. Select a user to view detailed information imported from Keycloak.
+. To view groups, select *Group* from the entity type filter.
+. Browse the list of groups shown on the page.
+. From the list of groups, select a group to view the information imported from Keycloak.
diff --git a/assemblies/assembly-about-rhdh.adoc b/assemblies/assembly-about-rhdh.adoc
index 73ab09bd98..e3d645d0df 100644
--- a/assemblies/assembly-about-rhdh.adoc
+++ b/assemblies/assembly-about-rhdh.adoc
@@ -12,11 +12,18 @@ endif::[]
:context: about-rhdh
-{product} ({product-very-short}) is an enterprise-grade internal developer portal designed to simplify and streamline software development processes. Combined with {ocp-brand-name}, {product-very-short} empowers platform engineering teams to create customized portals that improve developer productivity, accelerate onboarding, and enable faster application delivery. By reducing friction and complexity, {product-very-short} allows developers to focus on writing high-quality code while adhering to enterprise-class best practices.
+{product} ({product-very-short}) is an enterprise-grade internal developer portal (IDP) that helps simplify and accelerates software delivery. It provides a customizable web-based interface that centralizes access to key development resources, including source code repositories, CI and CD pipelines, APIs, documentation, and runtime environments.
-{product-very-short} integrates software templates, pre-designed solutions, and dynamic plugins into a centralized platform, providing tailored solutions for operations and development teams in a unified environment.
+{product} is designed for cloud-native environments, including supported Kubernetes platforms, {ocp-brand-name}, and hybrid infrastructure. By consolidating tools and standardizing development workflows, it helps teams deliver software faster with more consistency.
+
+Designed for enterprise-scale software teams, {product-very-short} helps developers focus on building software rather than managing tools. Developers can onboard quickly, create environments, and integrate with existing systems. With enterprise-grade security, role-based access control, and 24x7 support, teams stay productive while meeting compliance and reliability standards.
+
+include::modules/about/con_understanding-internal-developer-platforms.adoc[leveloffset=+1]
+
+// {product-very-short} integrates software templates, pre-designed solutions, and dynamic plugins into a centralized platform, providing tailored solutions for operations and development teams in a unified environment.
+
+// include::modules/about/con-benefits-of-rhdh.adoc[leveloffset=+1]
-include::modules/about/con-benefits-of-rhdh.adoc[leveloffset=+1]
include::modules/about/con-integrations-in-rhdh.adoc[leveloffset=+1]
include::modules/about/ref-supported-platforms.adoc[leveloffset=+1]
include::modules/about/ref-rhdh-sizing.adoc[leveloffset=+1]
diff --git a/assemblies/assembly-about-software-catalogs.adoc b/assemblies/assembly-about-software-catalogs.adoc
new file mode 100644
index 0000000000..605f8eb12d
--- /dev/null
+++ b/assemblies/assembly-about-software-catalogs.adoc
@@ -0,0 +1,30 @@
+:_mod-docs-content-type: ASSEMBLY
+:context: about-software-catalogs
+[id="{context}"]
+= About Software Catalogs
+
+The {product} Software Catalog is a centralized system that gives you visibility into all the software across your ecosystem, including services, websites, libraries, and data pipelines. You can use it to view ownership details and metadata for each component in one place.
+
+The metadata for the components in your Software Catalog is stored as YAML files that live alongside your code in your version control system. The version control repositories can include one or many metadata files. Software Catalog organizes items as entities, which include Components, Resources, and APIs, and other related types. Each entity includes associated metadata such as its owner, type, and other relevant details.
+
+By storing metadata in YAML files alongside the code, you allow {product} to process and display this information through a clear, visual interface. With the Software Catalog, you can manage and maintain your software, stay aware of all software available in your ecosystem, and take ownership of your services and tools.
+
+//[Add a Software Catalog screenshot of the latest build]
+
+The *Overview* page for a component provides key information such as links to the source code, documentation, dependencies, and ownership details. You can customize this page with plugins to suit specific needs.
+
+//[Add any Component screenshot of the latest build]
+
+include::modules/software-catalogs/proc-adding-new-components-to-the-rhdh-instance.adoc[leveloffset=+1]
+
+include::modules/software-catalogs/proc-creating-new-components-in-the-rhdh-instance.adoc[leveloffset=+2]
+
+include::modules/software-catalogs/proc-registering-components-manually-in-the-rhdh-instance.adoc[leveloffset=+2]
+
+include::modules/software-catalogs/proc-updating-components-in-the-software-catalog.adoc[leveloffset=+1]
+
+include::modules/software-catalogs/proc-searching-and-filter-software-catalogs.adoc[leveloffset=+1]
+
+include::modules/software-catalogs/proc-viewing-software-catalog-yaml.adoc[leveloffset=+1]
+
+include::modules/software-catalogs/proc-starring-components-in-the-software-catalog.adoc[leveloffset=+1]
\ No newline at end of file
diff --git a/assemblies/assembly-audit-log.adoc b/assemblies/assembly-audit-log.adoc
index b6940d3f7b..baeb4476f0 100644
--- a/assemblies/assembly-audit-log.adoc
+++ b/assemblies/assembly-audit-log.adoc
@@ -30,17 +30,9 @@ Audit logs are not forwarded to the internal log store by default because this d
.Additional resources
* For more information about logging in {ocp-short}, see link:https://docs.openshift.com/container-platform/latest/observability/logging/cluster-logging.html[About Logging]
-* For a complete list of fields that a {product-short} audit log can include, see xref:ref-audit-log-fields.adoc_{context}[]
-* For a list of scaffolder events that a {product-short} audit log can include, see xref:ref-audit-log-scaffolder-events.adoc_{context}[]
include::modules/observe/con-audit-log-config.adoc[]
include::modules/observe/proc-forward-audit-log-splunk.adoc[leveloffset=+2]
-include::modules/observe/proc-audit-log-view.adoc[]
-
-include::modules/observe/ref-audit-log-fields.adoc[leveloffset=+2]
-
-include::modules/observe/ref-audit-log-scaffolder-events.adoc[leveloffset=+2]
-
-include::modules/observe/ref-audit-log-catalog-events.adoc[leveloffset=+2]
+include::modules/observe/proc-audit-log-view.adoc[]
\ No newline at end of file
diff --git a/assemblies/assembly-authenticating-with-rhbk.adoc b/assemblies/assembly-authenticating-with-rhbk.adoc
index fd2439dbda..6b124ba27d 100644
--- a/assemblies/assembly-authenticating-with-rhbk.adoc
+++ b/assemblies/assembly-authenticating-with-rhbk.adoc
@@ -1,11 +1,6 @@
[id="assembly-authenticating-with-rhbk"]
= Authenticating with {rhbk-brand-name} ({rhbk})
-[NOTE]
-====
-{rhsso} 7.6 is deprecated as an authentication provider. You can continue using {rhsso} until the end of its maintenance support. For more information, see link:https://access.redhat.com/support/policy/updates/jboss_notes#p_sso[{rhsso} lifecycle dates]. As an alternative, consider migrating to {rhbk-brand-name} ({rhbk}).
-====
-
To authenticate users with {rhbk-brand-name} ({rhbk}):
. xref:enabling-authentication-with-rhbk[Enable the OpenID Connect (OIDC) authentication provider in RHDH].
diff --git a/assemblies/assembly-configuring-a-floating-action-button.adoc b/assemblies/assembly-configuring-a-floating-action-button.adoc
index 95d6156ee7..52e5fc37f3 100644
--- a/assemblies/assembly-configuring-a-floating-action-button.adoc
+++ b/assemblies/assembly-configuring-a-floating-action-button.adoc
@@ -5,4 +5,6 @@
You can use the floating action button plugin to configure any action as a floating button in the {product-short} instance. The floating action button plugin is enabled by default. You can also configure floating action buttons to display as submenu options within the main floating action button by assigning the floating action buttons to the same `slot` field of your `dynamic-plugins.yaml` file.
-include::modules/configuring-a-floating-action-button/proc-configuring-floating-action-button-as-a-dynamic-plugin.adoc[leveloffset=+1]
\ No newline at end of file
+include::modules/configuring-a-floating-action-button/proc-configuring-floating-action-button-as-a-dynamic-plugin.adoc[leveloffset=+1]
+
+include::modules/configuring-a-floating-action-button/ref-floating-action-button-parameters.adoc[leveloffset=+1]
\ No newline at end of file
diff --git a/assemblies/assembly-configuring-authorization-in-rhdh.adoc b/assemblies/assembly-configuring-authorization-in-rhdh.adoc
index 217a146e57..cfe7b7f6bb 100644
--- a/assemblies/assembly-configuring-authorization-in-rhdh.adoc
+++ b/assemblies/assembly-configuring-authorization-in-rhdh.adoc
@@ -1,26 +1,18 @@
[id='configuring-authorization-in-rhdh']
= Configuring authorization in {product}
-In link:{authorization-book-url}[{authentication-book-title}], you learnt how to authenticate users to {product}.
-{product-short} knowns who the users are.
+Administrators can authorize users to perform actions and define what users can do in {product-short}.
-In this book, learn how to authorize users to perform actions in {product-short}.
-Define what users can do in {product-short}.
+Role-based access control (RBAC) is a security concept that defines how to control access to resources in a system by specifying a mapping between users of the system and the actions that those users can perform on resources in the system.
+You can use RBAC to define roles with specific permissions and then assign the roles to users and groups.
-Role-Based Access Control (RBAC) is a security concept that controls access to resources in a system, and specifies a mapping between users of the system, and the actions they can perform on resources in the system.
-You define roles with specific permissions, and then assign the roles to users and groups.
+RBAC on {product-short} is built on top of the Permissions framework, which defines RBAC policies in code. Rather than defining policies in code, you can use the {product-short} RBAC feature to define policies in a declarative fashion by using a simple CSV based format. You can define the policies by using {product-short} web interface or REST API instead of editing the CSV directly.
-RBAC on {product-short} is built on top of the Permissions framework, which defines RBAC policies in code.
-Rather than defining policies in code,
-the {product-short} RBAC feature allows you
-to define policies in a declarative fashion using a simple CSV based format.
-You can define the policies by using {product-short} web interface or REST API, rather than editing the CSV directly.
+An administrator can define authorizations in {product-short} by taking the following steps:
-To define authorizations in {product-short}:
+. Enable the RBAC feature and give authorized users access to the feature.
-. The {product-short} administrator enables and gives access to the RBAC feature.
-
-. You define your roles and policies by combining the following methods:
+. Define roles and policies by combining the following methods:
* The {product-short} policy administrator uses the {product-short} web interface or REST API.
* The {product-short} administrator edits the main {product-short} configuration file.
@@ -43,6 +35,7 @@ include::assembly-managing-authorizations-by-using-external-files.adoc[leveloffs
include::assembly-configuring-guest-access-with-rbac-ui.adoc[leveloffset=+1]
+include::assembly-delegating-rbac-access-rhdh.adoc[leveloffset=+1]
include::modules/authorization/ref-rbac-permission-policies.adoc[leveloffset=+1]
@@ -57,4 +50,3 @@ include::modules/authorization/con-user-stats-rhdh.adoc[leveloffset=+1]
include::modules/authorization/proc-download-user-stats-rhdh.adoc[leveloffset=+2]
-
diff --git a/assemblies/assembly-configuring-default-secret-pvc-mounts.adoc b/assemblies/assembly-configuring-default-secret-pvc-mounts.adoc
new file mode 100644
index 0000000000..af8710113c
--- /dev/null
+++ b/assemblies/assembly-configuring-default-secret-pvc-mounts.adoc
@@ -0,0 +1,9 @@
+:_mod-docs-content-type: ASSEMBLY
+[id="assembly-configuring-default-secret-pvc-mounts_{context}"]
+= Configuring default mounts for Secrets and PVCs
+
+You can configure Persistent Volume Claims (PVCs) and Secrets mount in your {product} deployment. Use annotations to define the custom mount paths and specify the containers to mount them to.
+
+include::modules/configuring-external-databases/proc-configuring-mount-paths.adoc[leveloffset=+1]
+
+include::modules/configuring-external-databases/proc-mounting-to-specific-containers.adoc[leveloffset=+1]
\ No newline at end of file
diff --git a/assemblies/assembly-configuring-high-availability.adoc b/assemblies/assembly-configuring-high-availability.adoc
index 1e1340f112..0326720f35 100644
--- a/assemblies/assembly-configuring-high-availability.adoc
+++ b/assemblies/assembly-configuring-high-availability.adoc
@@ -3,11 +3,19 @@
[id="{context}"]
= Configuring high availability in {product}
-Previously, {product} supports a single-instance application. With this configuration, if the instance fails due to software crashes, hardware issues, or other unexpected disruptions, the entire {product} service becomes unavailable, preventing the development workflows or access to the resources. With high availability, you receive a failover mechanism that ensures the service is available even if one or more components fail. By increasing the number of replicas, you introduce redundancy to help increase higher productivity and minimize disruption.
-As an administrator, you can configure high availability in {product}. Once you set the high availability option in {product-short}, the {ocp-brand-name} built-in Load Balancer manages the ingress traffic and distributes the load to each pod. The {product-very-short} backend also manages concurrent requests or conflicts on the same resource.
+High availability (HA) is a system design approach that ensures a service remains continuously accessible, even during failures of individual components, by eliminating single points of failure. It introduces redundancy and failover mechanisms to minimize downtime and maintain operational continuity.
-You can configure high availability in {product-short} by scaling your `replicas` to a number greater than 1 in your configuration file. The configuration file that you use depends on the method that you used to install your {product-short} instance. If you used the Operator to install your {product-short} instance, configure the replica values in your `{product-custom-resource-type}` custom resource. If you used the Helm chart to install your {product-short} instance, configure the replica values in your Helm chart.
+{product} supports HA deployments on {ocp-brand-name} and {aks-name}. The HA deployments enable more resilient and reliable service availability across supported environments.
+
+In a single instance deployment, if a failure occurs, whether due to software crashes, hardware issues, or other unexpected disruptions, it would make the entire service unavailable, interrupting development workflows and access to key resources.
+
+With HA enabled, you can scale the number of backend replicas to introduce redundancy. This setup ensures that if one pod or component fails, others continue to serve requests without disruption. The built-in load balancer manages ingress traffic and distributes the load across the available pods. Meanwhile, the {product-very-short} backend manages concurrent requests and resolves resource-level conflicts effectively.
+
+As an administrator, you can configure high availability by adjusting replica values in your configuration file:
+
+* If you installed using the Operator, configure the replica values in your `{product-custom-resource-type}` custom resource.
+* If you used the Helm chart, set the replica values in the Helm configuration.
include::modules/configuring-high-availability/proc-configuring-high-availability-in-rhdh-operator-deployment.adoc[leveloffset=+1]
diff --git a/assemblies/assembly-configuring-templates.adoc b/assemblies/assembly-configuring-templates.adoc
index 29d502b62e..5d69a69b47 100644
--- a/assemblies/assembly-configuring-templates.adoc
+++ b/assemblies/assembly-configuring-templates.adoc
@@ -1,23 +1,25 @@
:_mod-docs-content-type: ASSEMBLY
:context: configuring-templates
[id="{context}"]
-= Configuring templates
+= About Software Templates
-Configure templates to create software components, and publish these components to different locations, such as the {product} software catalog, or Git repositories.
+Software Templates in {product} provide a streamlined way to create software components and publish them to different version control repositories like Git. Platform engineers create and maintain Software Templates in {product}.
-A template is a form composed of different UI fields that is defined in a YAML file. Templates include _actions_, which are steps that are executed in sequential order and can be executed conditionally.
+You can configure Software Templates to create software components, and publish these components to Git repositories. Once the components are published to Git repositories, register these components in the Software Catalog.
+
+A template is a form composed of different UI fields that is defined in a YAML file. Software Templates include _actions_, which are steps that are executed in sequential order and can be executed conditionally.
+
+* See link:https://developers.redhat.com/articles/2025/03/17/10-tips-better-backstage-software-templates#[10 tips for better Backstage Software Templates].
include::modules/customizing-templates/proc-creating-templates.adoc[leveloffset=+1]
include::modules/customizing-templates/ref-creating-templates.adoc[leveloffset=+1]
+include::modules/customizing-templates/proc-creating-a-new-software-component-using-templates.adoc[leveloffset=+1]
+include::modules/customizing-templates/proc-searching-and-filtering-software-templates.adoc[leveloffset=+1]
+include::modules/customizing-templates/proc-adding-templates.adoc[leveloffset=+1]
[role="_additional-resources"]
.Additional resources
+* link:{authentication-book-url}#assembly-auth-provider-github[Enabling the GitHub authentication provider]
* link:https://backstage.io/docs/features/software-templates/writing-templates[Backstage documentation - Writing Templates]
* link:https://backstage.io/docs/features/software-templates/builtin-actions[Backstage documentation - Builtin actions]
* link:https://backstage.io/docs/features/software-templates/writing-custom-actions[Backstage documentation - Writing Custom Actions]
-
-include::modules/customizing-templates/proc-adding-templates.adoc[leveloffset=+1]
-
-[role="_additional-resources"]
-.Additional resources
-* link:{authentication-book-url}#assembly-auth-provider-github[Enabling the GitHub authentication provider]
diff --git a/assemblies/assembly-configuring-the-global-header.adoc b/assemblies/assembly-configuring-the-global-header.adoc
index bc7c14a2bd..93badc6698 100644
--- a/assemblies/assembly-configuring-the-global-header.adoc
+++ b/assemblies/assembly-configuring-the-global-header.adoc
@@ -6,7 +6,7 @@
As an administrator, you can configure the {product} global header to create a consistent and flexible navigation bar across your {product-short} instance.
By default, the {product-short} global header includes the following components:
-* *Create* button provides quick access to a variety of templates, enabling users to efficiently set up services, backend and front-end plugins within {product-short}
+* *Self-service* button provides quick access to a variety of templates, enabling users to efficiently set up services, backend and front-end plugins within {product-short}
* *Support* button that can link an internal or external support page
* *Notifications* button displays alerts and updates from plugins and external services
* *Search* input field allows users to find services, components, documentation, and other resources within {product-short}
diff --git a/assemblies/assembly-customizing-segment-source.adoc b/assemblies/assembly-customizing-segment-source.adoc
new file mode 100644
index 0000000000..010cb9bca9
--- /dev/null
+++ b/assemblies/assembly-customizing-segment-source.adoc
@@ -0,0 +1,17 @@
+:_mod-docs-content-type: ASSEMBLY
+[id="customizing-segment-source_{context}"]
+= Customizing Segment source
+
+The `analytics-provider-segment` plugin sends the collected web analytics data to {company-name} by default. However, you can configure a new Segment source that receives web analytics data based on your needs. For configuration, you need a unique Segment write key that points to the Segment source.
+
+[NOTE]
+====
+Create your own web analytics data collection notice for your application users.
+====
+
+include::modules/analytics/proc-customizing-segment-source-using-the-operator.adoc[leveloffset=+1]
+
+include::modules/analytics/proc-customizing-segment-source-using-helm-the-helm-chart.adoc[leveloffset=+1]
+
+.Additional resources
+* To learn how to collect and analyze the same set of data, see link:{telemetry-data-collection-book-url}[{telemetry-data-collection-book-title}].
\ No newline at end of file
diff --git a/assemblies/assembly-customizing-the-appearance.adoc b/assemblies/assembly-customizing-the-appearance.adoc
index dc1be2a9fd..041dd7057f 100644
--- a/assemblies/assembly-customizing-the-appearance.adoc
+++ b/assemblies/assembly-customizing-the-appearance.adoc
@@ -25,7 +25,18 @@ include::modules/customizing-the-appearance/proc-customize-rhdh-theme-mode.adoc[
include::modules/customizing-the-appearance/proc-customize-rhdh-branding-logo.adoc[leveloffset=+1]
-include::modules/customizing-the-appearance/proc-customize-rhdh-sidebar-menuitems.adoc[leveloffset=+1]
+include::modules/customizing-the-appearance/con-customize-rhdh-sidebar-menuitems.adoc[leveloffset=+1]
+
+include::modules/customizing-the-appearance/proc-customize-rhdh-sidebar-menuitems.adoc[leveloffset=+2]
+
+include::modules/customizing-the-appearance/proc-configuring-dynamic-plugin-menuitem.adoc[leveloffset=+2]
+
+include::modules/customizing-the-appearance/proc-modifying-or-adding-rhdh-custom-menuitem.adoc[leveloffset=+2]
+
+include::modules/customizing-the-appearance/proc-customizing-entity-tab-titles.adoc[leveloffset=+1]
+
+
+include::modules/customizing-the-appearance/proc-customizing-entity-detail-tab-layout.adoc[leveloffset=+1]
include::modules/customizing-the-appearance/proc-customize-rhdh-palette.adoc[leveloffset=+1]
@@ -45,4 +56,3 @@ include::modules/customizing-the-appearance/ref-customize-rhdh-default-backstage
include::modules/customizing-the-appearance/proc-loading-custom-theme-using-dynamic-plugin.adoc[leveloffset=+1]
include::modules/customizing-the-appearance/ref-customize-rhdh-custom-components.adoc[leveloffset=+1]
-
diff --git a/assemblies/assembly-customizing-the-learning-paths.adoc b/assemblies/assembly-customizing-the-learning-paths.adoc
new file mode 100644
index 0000000000..4693974b62
--- /dev/null
+++ b/assemblies/assembly-customizing-the-learning-paths.adoc
@@ -0,0 +1,21 @@
+[id='proc-customize-rhdh-learning-paths_{context}']
+= Customizing the Learning Paths in {product}
+
+In {product}, you can configure Learning Paths by hosting the required data externally,
+and using the built-in proxy to deliver this data rather than the default.
+
+You can provide Learning Paths data from the following sources:
+
+* A JSON file hosted on a web server, such as GitHub or GitLab.
+* A dedicated service that provides the Learning Paths data in JSON format using an API.
+
+include::modules/customizing-the-learning-paths/con-about-learning-paths.adoc[leveloffset=+1]
+
+
+include::modules/customizing-the-learning-paths/proc-customizing-the-learning-paths-by-using-hosted-json-files.adoc[leveloffset=+1]
+
+
+include::modules/customizing-the-learning-paths/proc-customizing-the-learning-paths-by-using-a-dedicated-service.adoc[leveloffset=+1]
+
+
+include::modules/customizing-the-learning-paths/proc-starting-and-completing-lessions-in-learning-paths.adoc[leveloffset=+1]
\ No newline at end of file
diff --git a/assemblies/assembly-customizing-the-tech-radar-page.adoc b/assemblies/assembly-customizing-the-tech-radar-page.adoc
new file mode 100644
index 0000000000..0ec1a64d5f
--- /dev/null
+++ b/assemblies/assembly-customizing-the-tech-radar-page.adoc
@@ -0,0 +1,22 @@
+[id='proc-customizing-the-tech-radar-page_{context}']
+= Customizing the Tech Radar page in {product}
+
+In {product}, the Tech Radar page is provided by the `tech-radar` dynamic plugin, which is disabled by default. For information about enabling dynamic plugins in {product} see link:{configuring-dynamic-plugins-book-url}[{configuring-dynamic-plugins-book-title}].
+
+In {product}, you can configure Learning Paths by passing the data into the `{my-app-config-file}` file as a proxy. The base Tech Radar URL must include the `/developer-hub/tech-radar` proxy.
+
+[NOTE]
+====
+Due to the use of overlapping `pathRewrites` for both the `tech-radar` and `homepage` quick access proxies, you must create the `tech-radar` configuration (`^api/proxy/developer-hub/tech-radar`) before you create the `homepage` configuration (`^/api/proxy/developer-hub`).
+
+For more information about customizing the Home page in {product}, see xref:customizing-the-home-page[Customizing the Home page in {product}].
+====
+
+You can provide data to the Tech Radar page from the following sources:
+
+* JSON files hosted on GitHub or GitLab.
+* A dedicated service that provides the Tech Radar data in JSON format using an API.
+
+include::modules/customizing-the-tech-radar-page/proc-customizing-the-tech-radar-page-by-using-a-json-file.adoc[leveloffset=+1]
+
+include::modules/customizing-the-tech-radar-page/proc-customizing-the-tech-radar-page-by-using-a-customization-service.adoc[leveloffset=+1]
diff --git a/assemblies/assembly-delegating-rbac-access-rhdh.adoc b/assemblies/assembly-delegating-rbac-access-rhdh.adoc
new file mode 100644
index 0000000000..4c2ac83368
--- /dev/null
+++ b/assemblies/assembly-delegating-rbac-access-rhdh.adoc
@@ -0,0 +1,25 @@
+:_mod-docs-content-type: ASSEMBLY
+
+[id="assembly-delegating-rbac-access-rhdh_{context}"]
+= Delegating role-based access controls (RBAC) access in {product}
+
+An enterprise customer requires the ability to delegate role-based access control (RBAC) responsibilities to other individuals in the organization. In this scenario, you, as the administrator, can provide access to the RBAC plugin specifically to designated users, such as team leads. Each team lead is then able to manage permissions exclusively for users within their respective team or department, without visibility into or control over permissions outside their assigned scope. This approach allows team leads to manage access and permissions for their own teams independently, while administrators maintain global oversight.
+
+In {product}, you can delegate RBAC access using the multitenancy feature of the RBAC plugin, specifically the `IS_OWNER` conditional rule. You can either use the web UI or the RBAC backend API, depending on your preferred workflow and level of automation:
+
+* Use the web UI to create roles, assign users or groups, define permissions, and apply ownership conditions through an intuitive interface.
+
+* Use the API for a more flexible and automatable approach, where you can programmatically manage roles, permissions, and ownership conditions using authenticated curl requests.
+
+By delegating RBAC access through either method, you can expect the following outcomes:
+
+* Team leads can manage RBAC settings for their teams independently.
+* Visibility of other users' or teams' permissions is restricted.
+* Administrators retain overarching control while delegating team-specific access.
+
+.Prerequisites
+* Your {product-very-short} instance is running with the RBAC plugin installed and configured.
+* You have administrative access to {product-very-short}.
+
+include::modules/authorization/proc-delegating-rbac-access-webui.adoc[leveloffset=+1]
+include::modules/authorization/proc-delegating-rbac-access-api.adoc[leveloffset=+1]
\ No newline at end of file
diff --git a/assemblies/assembly-disabling-telemetry-data-collection.adoc b/assemblies/assembly-disabling-telemetry-data-collection.adoc
new file mode 100644
index 0000000000..421d49db5b
--- /dev/null
+++ b/assemblies/assembly-disabling-telemetry-data-collection.adoc
@@ -0,0 +1,11 @@
+:_mod-docs-content-type: ASSEMBLY
+[id="disabling-telemetry-data-collection_{context}"]
+= Disabling telemetry data collection in {product-very-short}
+
+To disable telemetry data collection, you must disable the `analytics-provider-segment` plugin either using the Helm Chart or the {product} Operator configuration.
+
+As an administrator, you can disable the telemetry data collection feature based on your needs. For example, in an air-gapped environment, you can disable this feature to avoid needless outbound requests affecting the responsiveness of the {product-very-short} application. For more details, see the link:{telemetry-data-collection-book-url}#proc-disabling-telemetry-using-operator_title-telemetry[Disabling telemetry data collection in {product-very-short}] section.
+
+include::modules/analytics/proc-disabling-telemetry-using-operator.adoc[leveloffset=+1]
+
+include::modules/analytics/proc-disabling-telemetry-using-helm.adoc[leveloffset=+1]
\ No newline at end of file
diff --git a/modules/observe/ref-enabling-telemetry.adoc b/assemblies/assembly-enabling-telemetry-data-collection.adoc
similarity index 64%
rename from modules/observe/ref-enabling-telemetry.adoc
rename to assemblies/assembly-enabling-telemetry-data-collection.adoc
index 9df4622324..62fa2dc9dd 100644
--- a/modules/observe/ref-enabling-telemetry.adoc
+++ b/assemblies/assembly-enabling-telemetry-data-collection.adoc
@@ -1,5 +1,9 @@
+:_mod-docs-content-type: ASSEMBLY
[id="enabling-telemetry-data-collection_{context}"]
= Enabling telemetry data collection in {product-very-short}
The telemetry data collection feature is enabled by default. However, if you have disabled the feature and want to re-enable it, you must enable the `analytics-provider-segment` plugin either by using the Helm Chart or the {product} Operator configuration.
+include::modules/analytics/proc-enabling-telemetry-using-operator.adoc[leveloffset=+1]
+
+include::modules/analytics/proc-enabling-telemetry-using-helm.adoc[leveloffset=+1]
\ No newline at end of file
diff --git a/assemblies/assembly-install-rhdh-airgapped-environment-k8s-helm.adoc b/assemblies/assembly-install-rhdh-airgapped-environment-k8s-helm.adoc
new file mode 100644
index 0000000000..f6e0087a46
--- /dev/null
+++ b/assemblies/assembly-install-rhdh-airgapped-environment-k8s-helm.adoc
@@ -0,0 +1,19 @@
+[id="assembly-install-rhdh-airgapped-environment-k8s-helm_{context}"]
+= Installing {product} on a supported Kubernetes platform in an air-gapped environment with the Helm chart
+
+If you are using a supported Kubernetes platform in a fully disconnected or partially disconnected environment, you can install {product} by using the Helm chart. Supported Kubernetes platforms include the following:
+
+* {aks-brand-name}
+* {eks-brand-name}
+* {gke-brand-name}
+
+[role="_additional-resources"]
+.Additional resources
+* link:https://docs.redhat.com/en/documentation/red_hat_developer_hub/1.5/html/installing_red_hat_developer_hub_on_microsoft_azure_kubernetes_service/proc-rhdh-deploy-aks-helm_title-install-rhdh-aks[Deploying {product-short} on {aks-short} with the Helm chart].
+* link:https://docs.redhat.com/en/documentation/red_hat_developer_hub/1.5/html/installing_red_hat_developer_hub_on_amazon_elastic_kubernetes_service/proc-rhdh-deploy-eks-helm_title-install-rhdh-eksp[Installing {product-short} on {eks-short} with the Helm chart],
+* link:https://docs.redhat.com/en/documentation/red_hat_developer_hub/1.5/html/installing_red_hat_developer_hub_on_google_kubernetes_engine/proc-rhdh-deploy-gke-helm_title-install-rhdh-gke[Installing {product-short} on {gke-short} with the Helm chart].
+
+include::modules/installation/proc-install-rhdh-airgapped-full-k8s-helm.adoc[leveloffset=+1]
+
+include::modules/installation/proc-install-rhdh-airgapped-partial-k8s-helm.adoc[leveloffset=+1]
+
diff --git a/assemblies/assembly-install-rhdh-eks-operator.adoc b/assemblies/assembly-install-rhdh-eks-operator.adoc
new file mode 100644
index 0000000000..457e182e71
--- /dev/null
+++ b/assemblies/assembly-install-rhdh-eks-operator.adoc
@@ -0,0 +1,10 @@
+:_mod-docs-content-type: ASSEMBLY
+[id="assembly-install-rhdh-eks-operator"]
+= Installing {product-short} on {eks-short} with the Operator
+
+The {product} Operator installation requires the Operator Lifecycle Manager (OLM) framework.
+
+.Additional resources
+* For information about the OLM, see link:https://olm.operatorframework.io/docs/[Operator Lifecycle Manager(OLM)] documentation.
+
+include::modules/installation/proc-rhdh-deploy-eks-operator.adoc[leveloffset=+1]
diff --git a/assemblies/assembly-logging-with-amazon-cloudwatch.adoc b/assemblies/assembly-logging-with-amazon-cloudwatch.adoc
new file mode 100644
index 0000000000..3d3c7b9057
--- /dev/null
+++ b/assemblies/assembly-logging-with-amazon-cloudwatch.adoc
@@ -0,0 +1,16 @@
+[id="assembly-logging-with-amazon-cloudwatch_{context}"]
+= Logging with Amazon CloudWatch
+
+Logging within the {product} relies on the link:https://github.com/winstonjs/winston[Winston library].
+The default logging level is `info`.
+To have more detailed logs, set the `LOG_LEVEL` environment variable to `debug` in your {product} instance.
+
+
+include::modules/observe/proc-configuring-the-application-log-level-for-logging-with-amazon-cloudwatch-logs-by-using-the-operator.adoc[leveloffset=+1]
+
+
+include::modules/observe/proc-configuring-the-application-log-level-for-logging-with-amazon-cloudwatch-logs-by-using-the-helm-chart.adoc[leveloffset=+1]
+
+
+include::modules/observe/proc-retrieving-logs-from-amazon-cloudwatch.adoc[leveloffset=+1]
+
diff --git a/assemblies/assembly-managing-labels-annotations-topology.adoc b/assemblies/assembly-managing-labels-annotations-topology.adoc
new file mode 100644
index 0000000000..ca5d269af6
--- /dev/null
+++ b/assemblies/assembly-managing-labels-annotations-topology.adoc
@@ -0,0 +1,20 @@
+:_mod-docs-content-type: ASSEMBLY
+[id="assembly-managing-labels-annotations-topology"]
+= Managing labels and annotations for Topology plugins
+:context: assembly-managing-labels-annotations-topology
+
+include::modules/dynamic-plugins/proc-linking-to-source-code-editor-or-source.adoc[leveloffset=+1]
+
+include::modules/dynamic-plugins/proc-entity-annotation-or-label.adoc[leveloffset=+1]
+
+include::modules/dynamic-plugins/proc-namespace-annotation.adoc[leveloffset=+1]
+
+include::modules/dynamic-plugins/proc-label-selector-query-annotation.adoc[leveloffset=+1]
+
+include::modules/dynamic-plugins/proc-icon-displayed-in-the-node.adoc[leveloffset=+1]
+
+include::modules/dynamic-plugins/proc-app-grouping.adoc[leveloffset=+1]
+
+include::modules/dynamic-plugins/proc-node-connector.adoc[leveloffset=+1]
+
+For more information about the labels and annotations, see _Guidelines for labels and annotations for OpenShift applications_.
\ No newline at end of file
diff --git a/assemblies/assembly-monitoring-and-logging-with-aws.adoc b/assemblies/assembly-monitoring-and-logging-with-aws.adoc
new file mode 100644
index 0000000000..5e90e14d8f
--- /dev/null
+++ b/assemblies/assembly-monitoring-and-logging-with-aws.adoc
@@ -0,0 +1,11 @@
+[id="assembly-monitoring-and-logging-with-aws_{context}"]
+= Monitoring and logging {product} on {aws-brand-name} ({aws-short})
+
+You can configure {product} to use Amazon CloudWatch for real-time monitoring and Amazon Prometheus for comprehensive logging.
+This is convenient when hosting {product-short} on {aws-brand-name} ({aws-short}) infrastructure.
+
+include::assembly-monitoring-with-amazon-prometheus.adoc[leveloffset=+1]
+
+
+include::assembly-logging-with-amazon-cloudwatch.adoc[leveloffset=+1]
+
diff --git a/assemblies/assembly-monitoring-with-amazon-prometheus.adoc b/assemblies/assembly-monitoring-with-amazon-prometheus.adoc
new file mode 100644
index 0000000000..5f31e876df
--- /dev/null
+++ b/assemblies/assembly-monitoring-with-amazon-prometheus.adoc
@@ -0,0 +1,22 @@
+[id="assembly-monitoring-with-amazon-prometheus_{context}"]
+= Monitoring with Amazon Prometheus
+
+You can configure {product} to use Amazon Prometheus for comprehensive logging.
+Amazon Prometheus extracts data from pods that have specific pod annotations.
+
+== Prerequisites
+
+* You link:https://docs.aws.amazon.com/eks/latest/userguide/prometheus.htm[configured Prometheus for your {eks-name} ({eks-short}) clusters].
+* You link:https://docs.aws.amazon.com/prometheus/latest/userguide/AMP-onboard-create-workspace.html[created an Amazon managed service for the Prometheus workspace].
+* You link:https://docs.aws.amazon.com/prometheus/latest/userguide/AMP-onboard-ingest-metrics.html[configured Prometheus to import the {product-short} metrics].
+* You ingested Prometheus metrics into the created workspace.
+
+
+include::modules/observe/proc-configuring-annotations-for-monitoring-with-amazon-prometheus-by-using-the-operator.adoc[leveloffset=+1]
+
+
+include::modules/observe/proc-configuring-annotations-for-monitoring-with-amazon-prometheus-by-using-the-helm-chart.adoc[leveloffset=+1]
+
+
+
+
diff --git a/assemblies/assembly-release-notes-fixed-security-issues.adoc b/assemblies/assembly-release-notes-fixed-security-issues.adoc
deleted file mode 100644
index b68de1d889..0000000000
--- a/assemblies/assembly-release-notes-fixed-security-issues.adoc
+++ /dev/null
@@ -1,12 +0,0 @@
-:_content-type: ASSEMBLY
-[id="fixed-security-issues"]
-= Fixed security issues
-
-This section lists security issues fixed in {product} {product-version}.
-
-== {product} {product-bundle-version}
-
-include::modules/release-notes/snip-fixed-security-issues-in-product-1.5.0.adoc[leveloffset=+2]
-
-include::modules/release-notes/snip-fixed-security-issues-in-rpm-1.5.0.adoc[leveloffset=+2]
-
diff --git a/assemblies/assembly-rhdh-telemetry.adoc b/assemblies/assembly-rhdh-telemetry.adoc
deleted file mode 100644
index 00ee46fdda..0000000000
--- a/assemblies/assembly-rhdh-telemetry.adoc
+++ /dev/null
@@ -1,41 +0,0 @@
-[id="assembly-rhdh-telemetry"]
-= Telemetry data collection
-
-The telemetry data collection feature helps in collecting and analyzing the telemetry data to improve your experience with {product}. This feature is enabled by default.
-
-[IMPORTANT]
-====
-As an administrator, you can disable the telemetry data collection feature based on your needs. For example, in an air-gapped environment, you can disable this feature to avoid needless outbound requests affecting the responsiveness of the {product-very-short} application. For more details, see the link:{telemetry-data-collection-book-url}#proc-disabling-telemetry-using-operator_title-telemetry[Disabling telemetry data collection in {product-very-short}] section.
-====
-
-{company-name} collects and analyzes the following data:
-
-* Events of page visits and clicks on links or buttons.
-* System-related information, for example, locale, timezone, user agent including browser and OS details.
-* Page-related information, for example, title, category, extension name, URL, path, referrer, and search parameters.
-* Anonymized IP addresses, recorded as `0.0.0.0`.
-* Anonymized username hashes, which are unique identifiers used solely to identify the number of unique users of the {product-very-short} application.
-
-With {product-very-short}, you can customize the telemetry data collection feature and the telemetry Segment source configuration based on your needs.
-
-
-// disabling telemetry
-include::modules/observe/ref-disabling-telemetry.adoc[leveloffset=+1]
-
-include::modules/observe/proc-disabling-telemetry-using-operator.adoc[leveloffset=+2]
-
-include::modules/observe/proc-disabling-telemetry-using-helm.adoc[leveloffset=+2]
-
-// enabling telemetry
-include::modules/observe/ref-enabling-telemetry.adoc[leveloffset=+1]
-
-include::modules/observe/proc-enabling-telemetry-using-operator.adoc[leveloffset=+2]
-
-include::modules/observe/proc-enabling-telemetry-using-helm.adoc[leveloffset=+2]
-
-// customizing telemetry segment source
-include::modules/observe/ref-customizing-telemetry-segment.adoc[leveloffset=+1]
-
-include::modules/observe/proc-customizing-telemetry-segment-using-operator.adoc[leveloffset=+2]
-
-include::modules/observe/proc-customizing-telemetry-segment-using-helm.adoc[leveloffset=+2]
diff --git a/assemblies/assembly-techdocs-add-docs.adoc b/assemblies/assembly-techdocs-add-docs.adoc
new file mode 100644
index 0000000000..ed5a306e10
--- /dev/null
+++ b/assemblies/assembly-techdocs-add-docs.adoc
@@ -0,0 +1,8 @@
+:_mod-docs-content-type: ASSEMBLY
+:context: assembly-techdocs-add-docs
+[id="{context}"]
+= Adding documentation to TechDocs
+
+After an administrator configures the TechDocs plugin, a developer can add documentation to TechDocs by importing it from a remote repository. Any authorized user or group can access the documentation that is imported into the TechDocs plugin.
+
+include::modules/techdocs/proc-techdocs-add-docs-from-remote-repo.adoc[leveloffset=+1]
diff --git a/assemblies/assembly-techdocs-addons-installing.adoc b/assemblies/assembly-techdocs-addons-installing.adoc
index 6e20fd3b08..9acaa7ef95 100644
--- a/assemblies/assembly-techdocs-addons-installing.adoc
+++ b/assemblies/assembly-techdocs-addons-installing.adoc
@@ -3,7 +3,9 @@
[id="techdocs-addon-installing"]
= Installing and configuring a TechDocs add-on
-TechDocs add-ons are supported by {company-name} are imported by the `backstage-plugin-techdocs-module-addons-contrib` plugin package. The `` add-on is preinstalled in the TechDocs plugin and enabled by default. You can install external TechDocs add-ons by adding them to `backstage-plugin-techdocs-module-addons-contrib` package with either the {product} Operator or Helm chart. Additionally, you can import compatible third-party add-ons, including add-ons that you create yourself, with a third-party dynamic plugin.
+TechDocs add-ons supported by {company-name} are exported to the TechDocs plugin by the`backstage-plugin-techdocs-module-addons-contrib` plugin package, which is preinstalled on {product} and enabled by default. The `` add-on is part of the default configuration of this plugin package and comes ready to use in the TechDocs plugin.
+
+You can install other supported TechDocs add-ons by configuring the`backstage-plugin-techdocs-module-addons-contrib` plugin package in the {product} ConfigMap or Helm chart, depending on whether you use the Operator or Helm chart for installation. If you want to customize your TechDocs experience beyond the functions of the supported add-ons, you can install third-party add-ons on your TechDocs plugin, including add-ons that you create yourself.
include::modules/techdocs/proc-techdocs-addon-install-operator.adoc[]
diff --git a/assemblies/assembly-techdocs-addons.adoc b/assemblies/assembly-techdocs-addons.adoc
index 7903a09071..ea4f380c12 100644
--- a/assemblies/assembly-techdocs-addons.adoc
+++ b/assemblies/assembly-techdocs-addons.adoc
@@ -31,4 +31,4 @@ The following table describes the TechDocs add-ons that are available for {produ
//future release | Expand or collapse the subtitles in the TechDocs navigation menu and keep your preferred state between documentation sites.
|===
-You can use a dynamic plugin to import a TechDocs add-on. TechDocs add-ons are enabled at the plugin level, therefore, enabling a plugin enables all of the TechDocs add-ons in the specified plugin package. The preinstalled and external add-ons supported by {company} are imported by the `backstage-plugin-techdocs-module-addons-contrib` plugin package. Since the `disabled` status is set at the plugin level, the `disabled` status of the plugin package applies to all of the TechDocs add-ons imported by that package.
+The `backstage-plugin-techdocs-module-addons-contrib` plugin package exports both preinstalled and external add-ons supported by {company-name} to the TechDocs plugin. This plugin package is preinstalled on {product} and is enabled by default. If the plugin package is disabled, all of the TechDocs add-ons exported by the package as also disabled.
diff --git a/assemblies/assembly-topology-plugin-configure.adoc b/assemblies/assembly-topology-plugin-configure.adoc
new file mode 100644
index 0000000000..ad1157b65d
--- /dev/null
+++ b/assemblies/assembly-topology-plugin-configure.adoc
@@ -0,0 +1,14 @@
+:_mod-docs-content-type: ASSEMBLY
+[id="assembly-topology-plugin-configure"]
+= Configuring the Topology plugin
+:context: assembly-topology-plugin-configure
+
+include::modules/dynamic-plugins/proc-viewing-openshift-routes.adoc[leveloffset=+1]
+
+include::modules/dynamic-plugins/proc-viewing-pod-logs.adoc[leveloffset=+1]
+
+include::modules/dynamic-plugins/proc-viewing-tekton-pipelineruns.adoc[leveloffset=+1]
+
+include::modules/dynamic-plugins/proc-viewing-virtual-machines.adoc[leveloffset=+1]
+
+include::modules/dynamic-plugins/proc-enabling-the-source-code-editor.adoc[leveloffset=+1]
\ No newline at end of file
diff --git a/assemblies/assembly-using-kubernetes-custom-actions.adoc b/assemblies/assembly-using-kubernetes-custom-actions.adoc
index f3610ed9bf..fad1f1d42c 100644
--- a/assemblies/assembly-using-kubernetes-custom-actions.adoc
+++ b/assemblies/assembly-using-kubernetes-custom-actions.adoc
@@ -1,8 +1,6 @@
[id='con-Kubernetes-custom-actions_{context}']
= Kubernetes custom actions in {product}
-include::{docdir}/artifacts/snip-technology-preview.adoc[]
-
With Kubernetes custom actions, you can create and manage Kubernetes resources.
The Kubernetes custom actions plugin is preinstalled and disabled on a {product-short} instance by default. You can disable or enable the Kubernetes custom actions plugin, and change other parameters, by configuring the {product} Helm chart.
diff --git a/assemblies/assembly-using-techdocs.adoc b/assemblies/assembly-using-techdocs.adoc
new file mode 100644
index 0000000000..61990c879a
--- /dev/null
+++ b/assemblies/assembly-using-techdocs.adoc
@@ -0,0 +1,14 @@
+:_mod-docs-content-type: ASSEMBLY
+:context: assembly-using-techdocs
+[id="{context}"]
+= Using TechDocs
+
+The TechDocs plugin is installed and enabled on your {product} instance by default. After an administrator configures the TechDocs plugin, an authorized developer can use the TechDocs plugin to add, view, or manage documentation.
+
+include::assembly-techdocs-add-docs.adoc[leveloffset=+1]
+
+include::modules/techdocs/proc-techdocs-find-docs.adoc[leveloffset=+1]
+
+include::modules/techdocs/proc-techdocs-view-docs.adoc[leveloffset=+1]
+
+include::modules/techdocs/proc-techdocs-edit-docs.adoc[leveloffset=+1]
diff --git a/assemblies/dynamic-plugins/assembly-configuring-rhdh-plugins.adoc b/assemblies/dynamic-plugins/assembly-configuring-rhdh-plugins.adoc
index e4740b72ff..cc104b32ee 100644
--- a/assemblies/dynamic-plugins/assembly-configuring-rhdh-plugins.adoc
+++ b/assemblies/dynamic-plugins/assembly-configuring-rhdh-plugins.adoc
@@ -20,15 +20,16 @@ include::../../artifacts/rhdh-plugins-reference/nexus-repository-manager/nexus-r
include::../../artifacts/rhdh-plugins-reference/tekton/tekton-plugin-admin.adoc[leveloffset=+1]
// Topology
-== Installing and configuring the Topology plugin
-include::../modules/dynamic-plugins/proc-topology-install.adoc[leveloffset=+2]
-include::../modules/dynamic-plugins/proc-topology-configure.adoc[leveloffset=+2]
+include::../dynamic-plugins/assembly-install-topology-plugin.adoc[leveloffset=+1]
+// Bulk Importing
include::../assembly-bulk-importing-from-github.adoc[leveloffset=+1]
+// ServiceNow
include::../assembly-using-servicenow.adoc[leveloffset=+1]
+// Kubernetes Custom Actions
include::../assembly-using-kubernetes-custom-actions.adoc[leveloffset=+1]
// Overriding Core Backend Service Configuration
-include::../modules/dynamic-plugins/con-overriding-core-backend-services.adoc[leveloffset=+1]
+include::../modules/dynamic-plugins/proc-overriding-core-backend-services.adoc[leveloffset=+1]
diff --git a/assemblies/dynamic-plugins/assembly-extensions-plugins.adoc b/assemblies/dynamic-plugins/assembly-extensions-plugins.adoc
index 666cf29070..599533e84f 100644
--- a/assemblies/dynamic-plugins/assembly-extensions-plugins.adoc
+++ b/assemblies/dynamic-plugins/assembly-extensions-plugins.adoc
@@ -29,8 +29,11 @@ include::../modules/dynamic-plugins/proc-viewing-installed-plugins.adoc[leveloff
// Searching and filtering Extensions
include::../modules/dynamic-plugins/con-catalog-searching-and-filtering.adoc[leveloffset=+1]
-// Viewing a plugin
-include::../modules/dynamic-plugins/ref-catalog-plugin.adoc[leveloffset=+2]
+// Viewing a plugin - screenshots added post 1.5 GA, so this section is no longer necessary
+// include::../modules/dynamic-plugins/ref-catalog-plugin.adoc[leveloffset=+2]
+
+// Installing a plugin
+include::../modules/dynamic-plugins/proc-extensions-installing.adoc[leveloffset=+1]
// Disabling Extensions
include::../modules/dynamic-plugins/proc-extensions-disabling.adoc[leveloffset=+1]
diff --git a/assemblies/dynamic-plugins/assembly-install-topology-plugin.adoc b/assemblies/dynamic-plugins/assembly-install-topology-plugin.adoc
new file mode 100644
index 0000000000..7e38a9fcc7
--- /dev/null
+++ b/assemblies/dynamic-plugins/assembly-install-topology-plugin.adoc
@@ -0,0 +1,8 @@
+[id="install-topology-plugin_{context}"]
+= Installing the Topology plugin
+
+include::../modules/dynamic-plugins/proc-topology-install.adoc[leveloffset=+2]
+
+include::../assembly-topology-plugin-configure.adoc[leveloffset=+1]
+
+include::../assembly-managing-labels-annotations-topology.adoc[leveloffset=+1]
\ No newline at end of file
diff --git a/assemblies/dynamic-plugins/assembly-rhdh-installing-dynamic-plugins.adoc b/assemblies/dynamic-plugins/assembly-rhdh-installing-dynamic-plugins.adoc
index 27b4366094..bf113f32a9 100644
--- a/assemblies/dynamic-plugins/assembly-rhdh-installing-dynamic-plugins.adoc
+++ b/assemblies/dynamic-plugins/assembly-rhdh-installing-dynamic-plugins.adoc
@@ -33,8 +33,8 @@ include::../modules/dynamic-plugins/ref-rh-tech-preview-plugins.adoc[leveloffset
//====
// Community plugins
-[id="rhdh-community-plugins"]
-include::../modules/dynamic-plugins/ref-community-plugins.adoc[leveloffset=+4]
+// [id="rhdh-community-plugins"]
+// include::../modules/dynamic-plugins/ref-community-plugins.adoc[leveloffset=+4]
// Red Hat compatible plugins
[id="rhdh-compatible-plugins"]
diff --git a/assemblies/dynamic-plugins/assembly-using-the-dynamic-plugins-cache.adoc b/assemblies/dynamic-plugins/assembly-using-the-dynamic-plugins-cache.adoc
new file mode 100644
index 0000000000..d7f06b8155
--- /dev/null
+++ b/assemblies/dynamic-plugins/assembly-using-the-dynamic-plugins-cache.adoc
@@ -0,0 +1,7 @@
+[id="using-the-dynamic-plugins-cache_{context}"]
+= Using the dynamic plugins cache
+
+include::../modules/dynamic-plugins/con-dynamic-plugins-cache.adoc[leveloffset=+1]
+include::../modules/dynamic-plugins/proc-creating-a-pvc-for-the-dynamic-plugin-cache-by-using-the-operator.adoc[leveloffset=+1]
+include::../modules/dynamic-plugins/proc-creating-a-pvc-for-the-dynamic-plugin-cache-by-using-helm.adoc[leveloffset=+1]
+include::../modules/dynamic-plugins/ref-configuring-the-dynamic-plugins-cache.adoc[leveloffset=+1]
\ No newline at end of file
diff --git a/images/rhdh-plugins-reference/group1.jpg b/images/rhdh-plugins-reference/group1.jpg
deleted file mode 100644
index 4b9a277ac4..0000000000
Binary files a/images/rhdh-plugins-reference/group1.jpg and /dev/null differ
diff --git a/images/rhdh-plugins-reference/rhdh-extensions-tekton-card.png b/images/rhdh-plugins-reference/rhdh-extensions-tekton-card.png
new file mode 100644
index 0000000000..fa8af004d7
Binary files /dev/null and b/images/rhdh-plugins-reference/rhdh-extensions-tekton-card.png differ
diff --git a/images/rhdh-plugins-reference/rhdh-extensions-tekton-details.png b/images/rhdh-plugins-reference/rhdh-extensions-tekton-details.png
new file mode 100644
index 0000000000..c36183e4cf
Binary files /dev/null and b/images/rhdh-plugins-reference/rhdh-extensions-tekton-details.png differ
diff --git a/images/rhdh-plugins-reference/rhdh-extensions-tekton-editor-1.png b/images/rhdh-plugins-reference/rhdh-extensions-tekton-editor-1.png
new file mode 100644
index 0000000000..e69a00be1c
Binary files /dev/null and b/images/rhdh-plugins-reference/rhdh-extensions-tekton-editor-1.png differ
diff --git a/images/rhdh-plugins-reference/rhdh-extensions-tekton-editor-2.png b/images/rhdh-plugins-reference/rhdh-extensions-tekton-editor-2.png
new file mode 100644
index 0000000000..9b95d76493
Binary files /dev/null and b/images/rhdh-plugins-reference/rhdh-extensions-tekton-editor-2.png differ
diff --git a/images/rhdh-plugins-reference/user-list.jpg b/images/rhdh-plugins-reference/user-list.jpg
deleted file mode 100644
index 344fd51353..0000000000
Binary files a/images/rhdh-plugins-reference/user-list.jpg and /dev/null differ
diff --git a/images/rhdh-plugins-reference/user2.jpg b/images/rhdh-plugins-reference/user2.jpg
deleted file mode 100644
index 95d29b7666..0000000000
Binary files a/images/rhdh-plugins-reference/user2.jpg and /dev/null differ
diff --git a/images/rhdh-plugins-reference/users.jpg b/images/rhdh-plugins-reference/users.jpg
deleted file mode 100644
index 393f14bdd5..0000000000
Binary files a/images/rhdh-plugins-reference/users.jpg and /dev/null differ
diff --git a/images/rhdh/template-creation-not-successful.png b/images/rhdh/template-creation-not-successful.png
new file mode 100644
index 0000000000..6fc1a920ac
Binary files /dev/null and b/images/rhdh/template-creation-not-successful.png differ
diff --git a/images/rhdh/template-creation-successful.png b/images/rhdh/template-creation-successful.png
new file mode 100644
index 0000000000..37d21d19d8
Binary files /dev/null and b/images/rhdh/template-creation-successful.png differ
diff --git a/images/rhdh/template-editor.png b/images/rhdh/template-editor.png
index 43b7497cf9..c8077ef46d 100644
Binary files a/images/rhdh/template-editor.png and b/images/rhdh/template-editor.png differ
diff --git a/modules/about/con_understanding-internal-developer-platforms.adoc b/modules/about/con_understanding-internal-developer-platforms.adoc
new file mode 100644
index 0000000000..a3161ad6b9
--- /dev/null
+++ b/modules/about/con_understanding-internal-developer-platforms.adoc
@@ -0,0 +1,70 @@
+:_newdoc-version: 2.18.3
+:_template-generated: 2025-05-12
+
+:_mod-docs-content-type: CONCEPT
+
+[id="understanding-internal-developer-platforms_{context}"]
+= Understanding internal developer platforms
+
+An internal developer platform (IDP) is a curated set of tools and services that supports developer self-service. Instead of navigating multiple systems, developers use a unified interface to provision environments, deploy code, and access APIs.
+
+Why IDPs matter::
+
+IDPs address the challenges of modern software delivery by enabling self-service, enforcing standards, and improving the developer experience.
+
+For organizations::
+
+* *Scalability:* {product-very-short} enables consistent developer onboarding and application delivery across growing teams and environments.
+* *Security:* Role-based access control (RBAC) and integration with enterprise systems ensure access is managed securely and in line with compliance requirements.
+* *Operational efficiency:* By removing manual handoffs and centralizing key development workflows, {product-very-short} improves time to value and increases return on engineering investment.
+
+For platform engineers::
+
+* *Curated platforms:* Platform teams can design reusable templates and integrations aligned with organizational policies and developer needs.
+* *Central configuration:* Infrastructure and policies are defined as code and centrally managed, reducing drift and maintenance overhead.
+* *Governance at scale:* Policies and best practices are embedded into developer workflows using automation and templates, without adding friction to the process.
+
+For developers::
+
+* *Faster onboarding:* Developers can use learning paths, software templates, and software catalog to deploy compliant services within minutes, without depending on other teams for setup.
+* *Reduced cognitive load:* Developers can find tools, documentation, and deployment environments in one place, eliminating the need to switch between systems or manage disconnected resources.
+* *Self-service workflows:* Developers can create applications or environments on demand, without raising tickets or waiting for approvals.
+* *Built-in standards:* Developers can use preconfigured templates that enforce secure, compliant workflows without requiring manual setup.
+* *Cross-team visibility:* Developers can discover shared service catalogs and documentation to improve reuse and reduce duplication.
+* *Higher productivity:* Developers can spend more time building features and less time configuring infrastructure or resolving toolchain inconsistencies.
+
+.Key features
+
+Centralized dashboard::
+Access development tools, CI/CD pipelines, APIs, monitoring tools, and documentation from a single interface. Integrate with systems like Git, {ocp-brand-name}, Kubernetes, and JIRA.
+
+Learning paths::
+Guide developers through structured tutorials and onboarding steps. Help teams upskill with internal and Red Hat training resources in one place.
+
+Plugins and integrations::
+Extend {product-very-short} with verified plugins that add new functionality without downtime. Dynamically integrate with supported tools such as Tekton for pipelines, GitOps for deployment automation, Nexus Repository for artifact storage, and JFrog Artifactory. {product-very-short} also supports connecting to {ocp-brand-name}, CI/CD systems, and security scanners through Red Hat-curated extensions.
+
+Role-Based Access Control (RBAC)::
+Manage user access with robust security permissions tailored to organizational needs.
+
+Software catalog::
+Search, view, and manage services, APIs, and libraries from a central inventory. Track ownership, metadata, and component health in one place.
+
+Software templates::
+Accelerate project setup using preconfigured templates for CI/CD, runtime, and security. Standardize implementation while enabling developer autonomy.
+
+Tech docs::
+Create, store, and view technical documentation alongside code. Make content searchable, consistently formatted, and accessible through the portal.
+
+Scalability::
+Support growing teams and applications while maintaining access to the same tools and services.
+
+[role="_additional-resources"]
+.Additional resources
+
+* link:https://docs.redhat.com/documentation/red_hat_developer_hub/{product-version}/html/customizing_red_hat_developer_hub/configuring-templates[Configuring templates]
+* link:https://docs.redhat.com/documentation/red_hat_developer_hub/{product-version}/html/techdocs_for_red_hat_developer_hub/index[TechDocs for {product}]
+* link:https://docs.redhat.com/documentation/red_hat_developer_hub/{product-version}/html-single/customizing_red_hat_developer_hub/index#proc-customize-rhdh-learning-paths_configuring-templates[Customizing the Learning Paths in {product}]
+* link:https://docs.redhat.com/documentation/red_hat_developer_hub/{product-version}/html/introduction_to_plugins/index[Introduction to plugins]
+* xref:integrations-in-rhdh_{context}[Integrations in {product}]
+* link:https://docs.redhat.com/documentation/red_hat_developer_hub/{product-version}/html/authorization_in_red_hat_developer_hub/index[Authorization in {product}]
diff --git a/modules/analytics/con-telemetry-data-collection-and-analysis.adoc b/modules/analytics/con-telemetry-data-collection-and-analysis.adoc
new file mode 100644
index 0000000000..c5a48f3954
--- /dev/null
+++ b/modules/analytics/con-telemetry-data-collection-and-analysis.adoc
@@ -0,0 +1,28 @@
+[id="telemetry-data-collection-and-analysis_{context}"]
+= Telemetry data collection and analysis
+
+The telemetry data collection feature helps in collecting and analyzing the telemetry data to improve your experience with {product}. This feature is enabled by default.
+
+{company-name} collects and analyzes the following data:
+
+Web Analytics::
+Web Analytics use the Segment tool.
+It is the tracking of user behavior and interactions with {product}.
+Specifically, it tracks the following:
+
+* Events of page visits and clicks on links or buttons.
+* System-related information, for example, locale, time zone, user agent including browser and operating system details.
+* Page-related information, for example, title, category, extension name, URL, path, referrer, and search parameters.
+* Anonymized IP addresses, recorded as `0.0.0.0`.
+* Anonymized username hashes, which are unique identifiers used solely to identify the number of unique users of the {product-very-short} application.
+
+System Observability::
+System Observability uses the OpenTelemetry tool.
+It is the tracking of the performance of the {product-very-short}.
+Specifically, it tracks the following metrics:
+
+* Key system metrics such as CPU usage, memory usage, and other performance indicators.
+* Information about system components, such as the locale, time zone, and user agent (including details of the browser and operating system).
+* Traces and logs monitor system processes, allowing you to troubleshoot potential issues impacting the performance of {product-very-short}.
+
+With {product-very-short}, you can customize the _Web Analytics_ and _System Observability_ configuration based on your needs.
\ No newline at end of file
diff --git a/modules/observe/proc-customizing-telemetry-segment-using-helm.adoc b/modules/analytics/proc-customizing-segment-source-using-helm-the-helm-chart.adoc
similarity index 89%
rename from modules/observe/proc-customizing-telemetry-segment-using-helm.adoc
rename to modules/analytics/proc-customizing-segment-source-using-helm-the-helm-chart.adoc
index 3b61cbb981..08deb63aa0 100644
--- a/modules/observe/proc-customizing-telemetry-segment-using-helm.adoc
+++ b/modules/analytics/proc-customizing-segment-source-using-helm-the-helm-chart.adoc
@@ -1,7 +1,7 @@
-[id="proc-customizing-telemetry-segment-using-helm_{context}"]
-= Customizing telemetry Segment source using the Helm Chart
+[id="customizing-segment-source-using-helm-the-helm-chart_{context}"]
+= Customizing Segment source using the Helm Chart
-You can configure integration with your Segment source by using the Helm Chart.
+You can configure integration with your Segment source by using the {product} Helm Chart.
.Prerequisites
diff --git a/modules/observe/proc-customizing-telemetry-segment-using-operator.adoc b/modules/analytics/proc-customizing-segment-source-using-the-operator.adoc
similarity index 90%
rename from modules/observe/proc-customizing-telemetry-segment-using-operator.adoc
rename to modules/analytics/proc-customizing-segment-source-using-the-operator.adoc
index 0d08938f23..5a1f01c9b2 100644
--- a/modules/observe/proc-customizing-telemetry-segment-using-operator.adoc
+++ b/modules/analytics/proc-customizing-segment-source-using-the-operator.adoc
@@ -1,7 +1,7 @@
[id="proc-customizing-telemetry-segment-using-operator_{context}"]
-= Customizing telemetry Segment source using the Operator
+= Customizing Segment source using the Operator
-You can configure integration with your Segment source by using the Operator.
+You can configure integration with your Segment source by using the {product} Operator.
.Prerequisites
diff --git a/modules/observe/proc-disabling-telemetry-using-helm.adoc b/modules/analytics/proc-disabling-telemetry-using-helm.adoc
similarity index 100%
rename from modules/observe/proc-disabling-telemetry-using-helm.adoc
rename to modules/analytics/proc-disabling-telemetry-using-helm.adoc
diff --git a/modules/observe/proc-disabling-telemetry-using-operator.adoc b/modules/analytics/proc-disabling-telemetry-using-operator.adoc
similarity index 100%
rename from modules/observe/proc-disabling-telemetry-using-operator.adoc
rename to modules/analytics/proc-disabling-telemetry-using-operator.adoc
diff --git a/modules/observe/proc-enabling-telemetry-using-helm.adoc b/modules/analytics/proc-enabling-telemetry-using-helm.adoc
similarity index 100%
rename from modules/observe/proc-enabling-telemetry-using-helm.adoc
rename to modules/analytics/proc-enabling-telemetry-using-helm.adoc
diff --git a/modules/observe/proc-enabling-telemetry-using-operator.adoc b/modules/analytics/proc-enabling-telemetry-using-operator.adoc
similarity index 100%
rename from modules/observe/proc-enabling-telemetry-using-operator.adoc
rename to modules/analytics/proc-enabling-telemetry-using-operator.adoc
diff --git a/modules/authentication/proc-enabling-authentication-with-rhbk.adoc b/modules/authentication/proc-enabling-authentication-with-rhbk.adoc
index 7d03a1479d..9bbc1909a6 100644
--- a/modules/authentication/proc-enabling-authentication-with-rhbk.adoc
+++ b/modules/authentication/proc-enabling-authentication-with-rhbk.adoc
@@ -9,13 +9,13 @@ To authenticate users with {rhbk-brand-name} ({rhbk}), enable the OpenID Connect
* You have sufficient permissions in {rhsso} to create and manage a realm.
.Procedure
-. To allow {product-short} to authenticate with {rhbk}, complete the steps in {rhbk}, to link:https://docs.redhat.com/en/documentation/red_hat_build_of_keycloak/24.0/html/getting_started_guide/getting-started-zip-#getting-started-zip-create-a-realm[create a realm and a user] and link:https://docs.redhat.com/en/documentation/red_hat_build_of_keycloak/24.0/html/getting_started_guide/getting-started-zip-#getting-started-zip-secure-the-first-application[secure the first application]:
+. To allow {product-short} to authenticate with {rhbk}, complete the steps in {rhbk}, to link:https://docs.redhat.com/en/documentation/red_hat_build_of_keycloak/26.0/html/getting_started_guide/getting-started-zip-#getting-started-zip-create-a-realm[create a realm and a user] and link:https://docs.redhat.com/en/documentation/red_hat_build_of_keycloak/26.0/html/getting_started_guide/getting-started-zip-#getting-started-zip-secure-the-first-application[secure the first application]:
-.. Use an existing realm, or link:https://docs.redhat.com/en/documentation/red_hat_build_of_keycloak/24.0/html/getting_started_guide/getting-started-zip-#getting-started-zip-create-a-realm[create a realm], with a distinctive **Name** such as ____.
+.. Use an existing realm, or link:https://docs.redhat.com/en/documentation/red_hat_build_of_keycloak/26.0/html/getting_started_guide/getting-started-zip-#getting-started-zip-create-a-realm[create a realm], with a distinctive **Name** such as ____.
Save the value for the next step:
* **{rhbk} realm base URL**, such as: ____/realms/____.
-.. To register your {product-short} in {rhbk}, in the created realm, link:https://docs.redhat.com/en/documentation/red_hat_build_of_keycloak/24.0/html-single/getting_started_guide/index#getting-started-zip-secure-the-first-application[secure the first application], with:
+.. To register your {product-short} in {rhbk}, in the created realm, link:https://docs.redhat.com/en/documentation/red_hat_build_of_keycloak/26.0/html-single/getting_started_guide/index#getting-started-zip-secure-the-first-application[secure the first application], with:
... **Client ID**: A distinctive client ID, such as __<{product-very-short}>__.
... **Valid redirect URIs**: Set to the OIDC handler URL: `https://____/api/auth/oidc/handler/frame`.
... Navigate to the **Credentials** tab and copy the **Client secret**.
@@ -23,7 +23,7 @@ Save the value for the next step:
* **Client ID**
* **Client Secret**
-.. To prepare for the verification steps, in the same realm, get the credential information for an existing user or link:https://docs.redhat.com/en/documentation/red_hat_build_of_keycloak/24.0/html-single/getting_started_guide/index#getting-started-zip-create-a-user[create a user]. Save the user credential information for the verification steps.
+.. To prepare for the verification steps, in the same realm, get the credential information for an existing user or link:https://docs.redhat.com/en/documentation/red_hat_build_of_keycloak/26.0/html-single/getting_started_guide/index#getting-started-zip-create-a-user[create a user]. Save the user credential information for the verification steps.
. To add your {rhsso} credentials to your {product-short}, add the following key/value pairs to link:{configuring-dynamic-plugins-book-url}#provisioning-your-custom-configuration[your {product-short} secrets]:
+
@@ -67,32 +67,6 @@ To allow the identity provider to automatically determine whether to prompt for
If `prompt: auto` is not set, the identity provider defaults to `prompt: none`, which assumes that you are already logged in and rejects sign-in requests without an active session.
====
-Optional: Consider adding the following optional fields:
-
-`dangerouslyAllowSignInWithoutUserInCatalog: true`::
-+
---
-To enable authentication without requiring to provision users in the {product-short} software catalog.
-
-WARNING: Use this option to explore {product-short} features, but do not use it in production.
-
-.`{my-app-config-file}` fragment with optional field to allow authenticating users absent from the software catalog
-[source,yaml]
-----
-auth:
- environment: production
- providers:
- oidc:
- production:
- metadataUrl: ${AUTH_OIDC_METADATA_URL}
- clientId: ${AUTH_OIDC_CLIENT_ID}
- clientSecret: ${AUTH_OIDC_CLIENT_SECRET}
- prompt: auto
-signInPage: oidc
-dangerouslyAllowSignInWithoutUserInCatalog: true
-----
---
-
`callbackUrl`::
{rhbk} callback URL.
+
@@ -147,7 +121,9 @@ auth:
`signIn`::
`resolvers`:::
-After successful authentication, the user signing in must be resolved to an existing user in the {product-short} catalog. To best match users securely for your use case, consider configuring a specific resolver. Enter the resolver list to override the default resolver: `emailLocalPartMatchingUserEntityName`.
+After successful authentication, the user signing in must be resolved to an existing user in the {product-short} catalog.
+To best match users securely for your use case, consider configuring a specific resolver.
+Enter the resolver list to override the default resolver: `oidcSubClaimMatchingKeycloakUserId`.
+
The authentication provider tries each sign-in resolver in order until it succeeds, and fails if none succeed.
+
@@ -155,10 +131,11 @@ WARNING: In production mode, only configure one resolver to ensure users are sec
`resolver`::::
Enter the sign-in resolver name.
Available values:
+* `oidcSubClaimMatchingKeycloakUserId`
* `emailLocalPartMatchingUserEntityName`
* `emailMatchingUserEntityProfileEmail`
* `preferredUsernameMatchingUserEntityName`
-
++
.`{my-app-config-file}` fragment with optional `resolvers` list
[source,yaml]
----
@@ -168,6 +145,7 @@ auth:
production:
signIn:
resolvers:
+ - resolver: oidcSubClaimMatchingKeycloakUserId
- resolver: preferredUsernameMatchingUserEntityName
- resolver: emailMatchingUserEntityProfileEmail
- resolver: emailLocalPartMatchingUserEntityName
@@ -191,7 +169,7 @@ auth:
clientSecret: ${AUTH_OIDC_CLIENT_SECRET}
signIn:
resolvers:
- - resolver: emailLocalPartMatchingUserEntityName
+ - resolver: oidcSubClaimMatchingKeycloakUserID
dangerouslyAllowSignInWithoutUserInCatalog: true
signInPage: oidc
----
diff --git a/modules/authorization/proc-delegating-rbac-access-api.adoc b/modules/authorization/proc-delegating-rbac-access-api.adoc
new file mode 100644
index 0000000000..dadda8d2cb
--- /dev/null
+++ b/modules/authorization/proc-delegating-rbac-access-api.adoc
@@ -0,0 +1,166 @@
+[id='proc-delegating-rbac-access-api_{context}']
+= Delegating RBAC access in {product} by using API
+
+You can delegate the RBAC access in {product} by using the RBAC backend API.
+
+.Prerequisites
+* You have API access using `curl` or another tool.
+
+.Procedure
+. Create a new role designated for team leads using the RBAC backend API:
++
+--
+.Example of creating a new role for the team lead using the RBAC backend API
+[source,bash]
+----
+curl -X POST 'http://localhost:7007/api/permission/roles' \
+--header "Authorization: Bearer $ADMIN_TOKEN" \
+--header "Content-Type: application/json" \
+--data '{
+ "memberReferences": ["user:default/team_lead"],
+ "name": "role:default/team_lead",
+ "metadata": {
+ "description": "This is an example team lead role"
+ }
+}'
+----
+--
+
+. Allow team leads to read catalog entities and create permissions in the RBAC plugin using the following API request:
++
+--
+.Example of granting the team lead role permission to create RBAC policies and read catalog entities
+[source,bash]
+----
+curl -X POST 'http://localhost:7007/api/permission/policies' \
+--header "Authorization: Bearer $ADMIN_TOKEN" \
+--header "Content-Type: application/json" \
+--data '[
+ {
+ "entityReference": "role:default/team_lead",
+ "permission": "policy.entity.create",
+ "policy": "create",
+ "effect": "allow"
+ },
+ {
+ "entityReference": "role:default/team_lead",
+ "permission": "catalog-entity",
+ "policy": "read",
+ "effect": "allow"
+ }
+]'
+----
+--
+
+. To ensure team leads can only manage what they own, use the `IS_OWNER` conditional rule as follows:
++
+--
+.Example `curl` of applying a conditional access policy using the `IS_OWNER` rule for the team lead role
+[source,bash]
+----
+curl -X POST 'http://localhost:7007/api/permission/roles/conditions' \
+--header "Authorization: Bearer $ADMIN_TOKEN" \
+--header "Content-Type: application/json" \
+--data '{
+ "result": "CONDITIONAL",
+ "pluginId": "permission",
+ "resourceType": "policy-entity",
+ "conditions": {
+ "rule": "IS_OWNER",
+ "resourceType": "policy-entity",
+ "params": {
+ "owners": [
+ "user:default/team_lead"
+ ]
+ }
+ },
+ "roleEntityRef": "role:default/team_lead",
+ "permissionMapping": [
+ "read",
+ "update",
+ "delete"
+ ]
+}'
+----
+The previous example of conditional policy limits visibility and control to only owned roles and policies.
+--
+
+. Log in to {product-very-short} as team lead and verify the following:
++
+--
+.. Use the following request and verify that you do not see any roles:
++
+.Example `curl` to retrieve roles visible to the team lead
+[source,bash]
+----
+curl -X GET 'http://localhost:7007/api/permission/roles' \
+--header "Authorization: Bearer $TEAM_LEAD_TOKEN"
+
+----
+
+.. Use the following request to create a new role for their team:
++
+.Example `curl` of team lead creating a new role for their team with ownership assigned
+[source,bash]
+----
+curl -X POST 'http://localhost:7007/api/permission/roles' \
+--header "Authorization: Bearer $TEAM_LEAD_TOKEN" \
+--header "Content-Type: application/json" \
+--data '{
+ "memberReferences": ["user:default/team_member"],
+ "name": "role:default/team_a",
+ "metadata": {
+ "description": "This is an example team_a role",
+ "owner": "user:default/team_lead"
+ }
+}'
+----
++
+[NOTE]
+====
+You can set the ownership during creation, but you can also update the ownership at any time.
+====
+
+.. Use the following request to assign a permission policy to the new role:
++
+.Example `curl` for granting read access to catalog entities for the new role
+[source,bash]
+----
+curl -X POST 'http://localhost:7007/api/permission/policies' \
+--header "Authorization: Bearer $ADMIN_TOKEN" \
+--header "Content-Type: application/json" \
+--data '[
+ {
+ "entityReference": "role:default/team_a",
+ "permission": "catalog-entity",
+ "policy": "read",
+ "effect": "allow"
+ }
+]'
+----
+
+.. Use the following request to verify that only team-owned roles and policies are visible:
++
+.Example `curl` to retrieve roles and permission policies visible to the team lead
+[source,bash]
+----
+curl -X GET 'http://localhost:7007/api/permission/roles' \
+--header "Authorization: Bearer $TEAM_LEAD_TOKEN"
+
+curl -X GET 'http://localhost:7007/api/permission/policies' \
+--header "Authorization: Bearer $TEAM_LEAD_TOKEN"
+----
+--
+
+.Verification
+* Log in as a team lead and verify the following:
++
+--
+** The RBAC UI is accessible.
+** Only the assigned users or group is visible.
+** Permissions outside the scoped team are not viewable or editable.
+--
+* Log in as an administrator and verify that you retain full visibility and control.
+
+
+
diff --git a/modules/authorization/proc-delegating-rbac-access-webui.adoc b/modules/authorization/proc-delegating-rbac-access-webui.adoc
new file mode 100644
index 0000000000..ec4b809c50
--- /dev/null
+++ b/modules/authorization/proc-delegating-rbac-access-webui.adoc
@@ -0,0 +1,29 @@
+[id='proc-delegating-rbac-access-webui_{context}']
+= Delegating RBAC access in {product} by using the web UI
+
+You can delegate the RBAC access in {product} by using the web UI.
+
+.Procedure
+. Log in to your {product-very-short} instance with administrator credentials.
+. Navigate to *Administration → RBAC*.
+. Click *Create Role* and define a new role for team leads, such as `role:default/team_lead`.
+. In the *Members* section, add the user or group, such as `user:default/team_lead`.
+. Grant permissions required by team leads, such as:
++
+--
+* `policy.entity.create` to allow policy creation.
+* `catalog-entity:read` to allow catalog access.
+--
+. Apply *conditions* to limit access as follows:
++
+* Use the `IS_OWNER` rule to ensure team leads can only manage resources they own.
+
+. Click *Save* to create the role and apply changes.
+
+.Verification
+* Log in as a team lead.
+* Verify the following:
++
+** RBAC UI is accessible.
+** Only users or roles related to their team are visible.
+** No access to roles or permissions outside their scope is granted.
\ No newline at end of file
diff --git a/modules/authorization/ref-rbac-permission-policies.adoc b/modules/authorization/ref-rbac-permission-policies.adoc
index b1afd13675..800231ed7f 100644
--- a/modules/authorization/ref-rbac-permission-policies.adoc
+++ b/modules/authorization/ref-rbac-permission-policies.adoc
@@ -158,7 +158,7 @@ RBAC permissions::
|Allows a user or role to read permission policies and roles
|`policy.entity.create`
-|`policy-entity`
+|
|`create`
|Allows a user or role to create a single or multiple permission policies and roles
@@ -183,6 +183,16 @@ Kubernetes permissions::
|Policy
|Description
+|`kubernetes.clusters.read`
+|
+|`read`
+|Allows a user to read Kubernetes cluster details under the `/clusters` path
+
+|`kubernetes.resources.read`
+|
+|`read`
+|Allows a user to read information about Kubernetes resources located at `/services/:serviceId` and `/resources`
+
|`kubernetes.proxy`
|
|`use`
@@ -241,13 +251,79 @@ Topology permissions::
|Policy
|Description
-|`topology.view.read`
+|`kubernetes.clusters.read`
+|
+|`read`
+|Allows a user to read Kubernetes cluster details under the `/clusters` path
+
+|`kubernetes.resources.read`
+|
+|`read`
+|Allows a user to read information about Kubernetes resources located at `/services/:serviceId` and `/resources`
+
+|`kubernetes.proxy`
+|
+|`use`
+|Allows a user or role to access the proxy endpoint, allowing the user or role to read pod logs and events within {product-very-short}
+|===
+
+
+Tekton permissions::
+
+.Tekton permissions
+[cols="15%,25%,15%,45%", frame="all", options="header"]
+|===
+|Name
+|Resource type
+|Policy
+|Description
+
+|`kubernetes.clusters.read`
|
|`read`
-|Allows a user or role to view the topology plugin
+|Allows a user to read Kubernetes cluster details under the `/clusters` path
+
+|`kubernetes.resources.read`
+|
+|`read`
+|Allows a user to read information about Kubernetes resources located at `/services/:serviceId` and `/resources`
|`kubernetes.proxy`
|
|`use`
|Allows a user or role to access the proxy endpoint, allowing the user or role to read pod logs and events within {product-very-short}
|===
+
+
+ArgoCD permissions::
+
+.ArgoCD permissions
+[cols="15%,25%,15%,45%", frame="all", options="header"]
+|===
+|Name
+|Resource type
+|Policy
+|Description
+
+|`argocd.view.read`
+|
+|`read`
+|Allows a user to read from the ArgoCD plugin
+|===
+
+
+Quay permissions::
+
+.Quay permissions
+[cols="15%,25%,15%,45%", frame="all", options="header"]
+|===
+|Name
+|Resource type
+|Policy
+|Description
+
+|`quay.view.read`
+|
+|`read`
+|Allows a user to read from the Quay plugin
+|===
diff --git a/modules/configuring-a-floating-action-button/proc-configuring-floating-action-button-as-a-dynamic-plugin.adoc b/modules/configuring-a-floating-action-button/proc-configuring-floating-action-button-as-a-dynamic-plugin.adoc
index 7396212e1f..72b8b4e345 100644
--- a/modules/configuring-a-floating-action-button/proc-configuring-floating-action-button-as-a-dynamic-plugin.adoc
+++ b/modules/configuring-a-floating-action-button/proc-configuring-floating-action-button-as-a-dynamic-plugin.adoc
@@ -11,14 +11,14 @@ You must have sufficient permissions as a platform engineer.
To configure a floating action button as a dynamic plugin, complete any of the following tasks:
-* Specify the `global.floatingactionbutton/config` mount point in your `app-config-dynamic.yaml` file. For example:
+* Specify the `global.floatingactionbutton/config` mount point in your `app-config-dynamic.yaml` file. For example:
+
.Example of a bulk-import plugin as a floating action button
[source,yaml]
----
- package: ./dynamic-plugins/dist/red-hat-developer-hub-backstage-plugin-bulk-import
disabled: false
- pluginConfig:
+ pluginConfig:
dynamicPlugins:
frontend:
red-hat-developer-hub.backstage-plugin-bulk-import:
@@ -206,90 +206,4 @@ To configure a floating action button as a dynamic plugin, complete any of the f
icon: bulkImportIcon
text: Bulk import
----
-<1> (Required) The import name with an associated component to the mount point.
-
-== Floating action button parameters
-Use the parameters as shown in the following table to configure your floating action button plugin.
-
-.Floating action button parameters
-|===
-| Name | Description | Type | Default value | Required
-
-| `slot`
-| Position of the floating action button. Valid values: `PAGE_END`, `BOTTOM_LEFT`
-| `enum`
-| `PAGE_END`
-| No
-
-| `label`
-| Name of the floating action button
-| `String`
-| Not applicable
-| Yes
-
-| `icon`
-| Icon of the floating action button. Recommended to use filled icons from the link:https://fonts.google.com/icons[Material Design library]. You can also use an svg icon. For example: ``
-| `String`, `React.ReactElement`, `SVG image icon`, `HTML image icon`
-| Not applicable
-| No
-
-| `showLabel`
-| Display of the label next to your icon
-| `Boolean`
-| Not applicable
-| No
-
-| `size`
-| Size of the floating action button
-| `small`, `medium`, `large`
-| `medium`
-| No
-
-| `color`
-| Color of the component. It supports both default and custom theme colors, that are added from the link:https://mui.com/material-ui/customization/palette/#custom-colors[Palette Getting started guide].
-| `default`, `error`, `info`, `inherit`, `primary`, `secondary`, `success`, `warning`
-| `default`
-| No
-
-| `onClick`
-| Performed action when selecting a floating action button
-| `React.MouseEventHandler`
-| Not applicable
-| No
-
-| `to`
-| Link that opens when selecting a floating action button
-| `String`
-| Not applicable
-| No
-
-| `toolTip`
-| Text that appears when hovering over a floating action button
-| `String`
-| Not applicable
-| No
-
-| `priority`
-| Order of the floating action buttons displayed in the submenu. A larger value means higher priority.
-| `number`
-| Not applicable
-| No
-
-| `visibleOnPaths`
-| Display floating action button on the specified paths
-| `string[]`
-| Display floating action button on all paths
-| No
-
-| `excludeOnPaths`
-| Hide floating action button on the specified paths
-| `string[]`
-| Display floating action button on all paths
-| No
-
-|===
-
-[NOTE]
-====
-If multiple floating button actions are assigned to the same `slot` value, the floating buttons are displayed as submenu options within the main floating action button.
-====
\ No newline at end of file
+<1> (Required) The import name with an associated component to the mount point.
\ No newline at end of file
diff --git a/modules/configuring-a-floating-action-button/ref-floating-action-button-parameters.adoc b/modules/configuring-a-floating-action-button/ref-floating-action-button-parameters.adoc
new file mode 100644
index 0000000000..40eaf2d5ec
--- /dev/null
+++ b/modules/configuring-a-floating-action-button/ref-floating-action-button-parameters.adoc
@@ -0,0 +1,88 @@
+:_mod-docs-content-type: PROCEDURE
+[id="ref-floating-action-button-parameters_{context}"]
+= Floating action button parameters
+
+Use the parameters as shown in the following table to configure your floating action button plugin.
+
+.Floating action button parameters
+|===
+| Name | Description | Type | Default value | Required
+
+| `slot`
+| Position of the floating action button. Valid values: `PAGE_END`, `BOTTOM_LEFT`
+| `enum`
+| `PAGE_END`
+| No
+
+| `label`
+| Name of the floating action button
+| `String`
+| Not applicable
+| Yes
+
+| `icon`
+| Icon of the floating action button. Recommended to use filled icons from the link:https://fonts.google.com/icons[Material Design library]. You can also use an svg icon. For example: ``
+| `String`, `React.ReactElement`, `SVG image icon`, `HTML image icon`
+| Not applicable
+| No
+
+| `showLabel`
+| Display of the label next to your icon
+| `Boolean`
+| Not applicable
+| No
+
+| `size`
+| Size of the floating action button
+| `small`, `medium`, `large`
+| `medium`
+| No
+
+| `color`
+| Color of the component. It supports both default and custom theme colors, that are added from the link:https://mui.com/material-ui/customization/palette/#custom-colors[Palette Getting started guide].
+| `default`, `error`, `info`, `inherit`, `primary`, `secondary`, `success`, `warning`
+| `default`
+| No
+
+| `onClick`
+| Performed action when selecting a floating action button
+| `React.MouseEventHandler`
+| Not applicable
+| No
+
+| `to`
+| Link that opens when selecting a floating action button
+| `String`
+| Not applicable
+| No
+
+| `toolTip`
+| Text that appears when hovering over a floating action button
+| `String`
+| Not applicable
+| No
+
+| `priority`
+| Order of the floating action buttons displayed in the submenu. A larger value means higher priority.
+| `number`
+| Not applicable
+| No
+
+| `visibleOnPaths`
+| Display floating action button on the specified paths
+| `string[]`
+| Display floating action button on all paths
+| No
+
+| `excludeOnPaths`
+| Hide floating action button on the specified paths
+| `string[]`
+| Display floating action button on all paths
+| No
+
+|===
+
+[NOTE]
+====
+If multiple floating button actions are assigned to the same `slot` value, the floating buttons are displayed as submenu options within the main floating action button.
+====
\ No newline at end of file
diff --git a/modules/configuring-external-databases/proc-configuring-mount-paths.adoc b/modules/configuring-external-databases/proc-configuring-mount-paths.adoc
new file mode 100644
index 0000000000..42d9279d4b
--- /dev/null
+++ b/modules/configuring-external-databases/proc-configuring-mount-paths.adoc
@@ -0,0 +1,40 @@
+:_mod-docs-content-type: PROCEDURE
+[id="proc-configuring-mount-paths_{context}"]
+= Configuring mount paths for Secrets and PVCs
+
+By default, the mount path is the working directory of the {product-short} container. If you do not define the mount path, it defaults to `/opt/app-root/src`.
+
+.Procedure
+
+. To specify a PVC mount path, add the `rhdh.redhat.com/mount-path` annotation to your configuration file as shown in the following example:
++
+.Example specifying where the PVC mounts
+[source,yaml,subs="+attributes,+quotes"]
+----
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+ name: __
+ annotations:
+ rhdh.redhat.com/mount-path: /mount/path/from/annotation
+----
+where:
+
+`rhdh.redhat.com/mount-path`:: Specifies which mount path the PVC mounts to (in this case, `/mount/path/from/annotation` directory).
+__:: Specifies the PVC to mount.
+
+. To specify a Secret mount path, add the `rhdh.redhat.com/mount-path` annotation to your configuration file as shown in the following example:
++
+.Example specifying where the Secret mounts
+[source,yaml,subs="+attributes,+quotes"]
+----
+apiVersion: v1
+kind: Secret
+metadata:
+ name: __
+ annotations:
+ rhdh.redhat.com/mount-path: /mount/path/from/annotation
+----
+where:
+
+__:: Specifies the Secret name.
\ No newline at end of file
diff --git a/modules/configuring-external-databases/proc-mounting-to-specific-containers.adoc b/modules/configuring-external-databases/proc-mounting-to-specific-containers.adoc
new file mode 100644
index 0000000000..b5f04c1b04
--- /dev/null
+++ b/modules/configuring-external-databases/proc-mounting-to-specific-containers.adoc
@@ -0,0 +1,43 @@
+:_mod-docs-content-type: PROCEDURE
+[id="proc-mounting-to-specific-containers_{context}"]
+= Mounting Secrets and PVCs to specific containers
+
+By default, Secrets and PVCs mount only to the {product} `backstage-backend` container. You can add the `rhdh.redhat.com/containers` annotation to your configuration file to specify the containers to mount to.
+
+.Procedure
+
+. To mount Secrets to *all* containers, set the `rhdh.redhat.com/containers` annotation to `*` in your configuration file:
++
+.Example mounting to all containers
+[source,yaml,subs="+attributes,+quotes"]
+----
+apiVersion: v1
+kind: Secret
+metadata:
+ name: __
+ annotations:
+ rhdh.redhat.com/containers: `*`
+----
++
+[IMPORTANT]
+====
+Set `rhdh.redhat.com/containers` to `*` to mount it to all containers in the deployment.
+====
+
+. To mount to specific containers, separate the names with commas:
++
+.Example separating the list of containers
+[source,yaml,subs="+attributes,+quotes"]
+----
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+ name: __
+ annotations:
+ rhdh.redhat.com/containers: "init-dynamic-plugins,backstage-backend"
+----
++
+[NOTE]
+====
+This configuration mounts the `__` PVC to the `init-dynamic-plugins` and `backstage-backend` containers.
+====
\ No newline at end of file
diff --git a/modules/configuring-the-global-header/proc-customize-rhdh-global-header.adoc b/modules/configuring-the-global-header/proc-customize-rhdh-global-header.adoc
index a7a3d4c69a..ac52bbcfa5 100644
--- a/modules/configuring-the-global-header/proc-customize-rhdh-global-header.adoc
+++ b/modules/configuring-the-global-header/proc-customize-rhdh-global-header.adoc
@@ -2,7 +2,7 @@
= Customizing your {product} global header
You can use the `red-hat-developer-hub.backstage-plugin-global-header` dynamic plugin to extend the global header with additional buttons and customize the order and position of icons and features. Additionally, you can create and integrate your custom dynamic header plugins using the mount points provided by this new header feature, allowing you to further tailor to suit your needs.
-For more information on enabling dynamic plugins, see link:{installing-and-viewing-dynamic-plugins-url}[{installing-and-viewing-dynamic-plugins-title}].
+For more information about enabling dynamic plugins, see link:{installing-and-viewing-plugins-book-url}[{installing-and-viewing-plugins-book-title}].
.Default global header configuration
@@ -42,7 +42,7 @@ For more information on enabling dynamic plugins, see link:{installing-and-viewi
config:
priority: 90
props:
- title: Create...
+ title: Self-service
icon: add
to: create
- mountPoint: global.header/component
@@ -76,7 +76,7 @@ For more information on enabling dynamic plugins, see link:{installing-and-viewi
----
<1> *search*: Hides the *Search* modal in the sidebar menu. Change it to `true` to display the *Search* modal in the sidebar.
<2> *settings*: Hides the *Settings* button in the sidebar menu. Change it to `true` to display the *Settings* button in the sidebar.
-<3> `default.main-menu-items`: Hides the *Create* button from the sidebar menu. Remove this field to display the *Create* button in the sidebar.
+<3> `default.main-menu-items`: Hides the *Self-service* button from the sidebar menu. Remove this field to display the *Self-service* button in the sidebar.
<4> *position*: Defines the position of the header. Options: `above-main-content` or `above-sidebar`.
To extend the functionality of the default global header, include any the following attributes in your global header entry:
@@ -91,16 +91,16 @@ The `red-hat-developer-hub.backstage-plugin-global-header` package (enabled by d
- **`SearchComponent`**: Adds a search bar (enabled by default).
- **`Spacer`**: Adds spacing in the header to position buttons at the end. Useful when you disable `SearchComponent`.
-- **`HeaderIconButton`**: Adds an icon button. By default, the *Create* icon button remains enabled.
+- **`HeaderIconButton`**: Adds an icon button. By default, the *Self-service* icon button remains enabled.
- **`SupportButton`**: Adds a *Support* icon button, allowing users to configure a link to an internal or external page. Enabled by default but requires additional configuration to display.
- **`NotificationButton`**: Adds a *Notifications* icon button to display unread notifications in real time and navigate to the *Notifications* page. Enabled by default (requires the notifications plugin).
- **`Divider`**: Adds a vertical divider. By default, a divider appears between the profile dropdown and other header components.
- **`ProfileDropdown`**: Adds a profile dropdown showing the logged-in user's name. By default, it contains two menu items.
- **`MenuItemLink`**: Adds a link item in a dropdown menu. By default, the profile dropdown includes a link to the *Settings* page.
- **`LogoutButton`**: Adds a logout button in the profile dropdown (enabled by default).
-- **`CreateDropdown`**: Adds a *Create* dropdown button (disabled by default). The menu items are configurable.
-- **`SoftwareTemplatesSection`**: Adds a list of software template links to the *Create* dropdown menu (disabled by default). You must enable `CreateDropdown`.
-- **`RegisterAComponentSection`**: Adds a link to the *Register a Component* page in the *Create* dropdown menu (disabled by default). You must enable `CreateDropdown`.
+- **`CreateDropdown`**: Adds a *Self-service* dropdown button (disabled by default). The menu items are configurable.
+- **`SoftwareTemplatesSection`**: Adds a list of software template links to the *Self-service* dropdown menu (disabled by default). You must enable `CreateDropdown`.
+- **`RegisterAComponentSection`**: Adds a link to the *Register a Component* page in the *Self-service* dropdown menu (disabled by default). You must enable `CreateDropdown`.
`config.position`::
Specifies the position of the header. Supported values are `above-main-content` and `above-sidebar`.
diff --git a/modules/configuring/proc-enabling-the-rhdh-plugin-assets-cache.adoc b/modules/configuring/proc-enabling-the-rhdh-plugin-assets-cache.adoc
new file mode 100644
index 0000000000..add80975a4
--- /dev/null
+++ b/modules/configuring/proc-enabling-the-rhdh-plugin-assets-cache.adoc
@@ -0,0 +1,37 @@
+[id="enabling-the-rhdh-plugin-assets-cache_{context}"]
+= Enabling the {product} plugin assets cache
+
+By default, {product} does not cache plugin assets.
+You can use a Redis cache store to improve {product-short} performance and reliability.
+Configured plugins in {product-short} receive dedicated cache connections, which are powered by the Keyv Redis client.
+
+.Prerequisites
+* You have installed {product}.
+* You have an active Redis server.
+For more information on setting up an external Redis server, see the link:https://www.redis.io/docs/latest/[`official Redis documentation`].
+
+.Procedure
+. Enable the {product-short} cache by defining Redis as the cache store type and entering your Redis server connection URL in your `{my-app-config-file}` file.
++
+.`{my-app-config-file}` file fragment
+[source,yaml,subs="+quotes"]
+----
+backend:
+ cache:
+ store: redis
+ connection: redis://user:pass@cache.example.com:6379
+----
+
+. Enable the cache for Techdocs by adding the `techdocs.cache.ttl` setting in your `{my-app-config-file}` file.
+This setting specifies how long, in milliseconds, a statically built asset should stay in the cache.
++
+.`{my-app-config-file}` file fragment
+[source,yaml]
+----
+techdocs:
+ cache:
+ ttl: 3600000
+----
+
+TIP: Optionally, enable the cache for unsupported plugins that support this feature.
+See their respective documentation for details.
diff --git a/modules/configuring/proc-provisioning-your-custom-configuration.adoc b/modules/configuring/proc-provisioning-your-custom-configuration.adoc
index b55374b737..f4f41608fe 100644
--- a/modules/configuring/proc-provisioning-your-custom-configuration.adoc
+++ b/modules/configuring/proc-provisioning-your-custom-configuration.adoc
@@ -20,8 +20,14 @@ It contains one secret per line in `KEY=value` form.
. Author your custom `{my-app-config-file}` file.
This is the main {product-short} configuration file.
-+
-The `baseUrl` field is mandatory in your `{my-app-config-file}` file to ensure proper functionality of {product-short}. You must specify the `baseUrl` in both the `app` and `backend` sections to avoid errors during initialization.
+You need a custom `{my-app-config-file}` file to avoid the {product-short} installer to revert user edits during upgrades.
+When your custom `{my-app-config-file}` file is empty, {product-short} is using default values.
+
+** To prepare a deployment with the {product} Operator on {ocp-short}, you can start with an empty file.
+
+** To prepare a deployment with the {product} Helm chart, or on Kubernetes, enter the {product-short} base URL in the relevant fields in your `{my-app-config-file}` file to ensure proper functionality of {product-short}.
+The base URL is what a {product-short} user sees in their browser when accessing {product-short}.
+The relevant fields are `baseUrl` in the `app` and `backend` sections, and `origin` in the `backend.cors` subsection:
+
.Configuring the `baseUrl` in `{my-app-config-file}`
====
@@ -43,13 +49,13 @@ backend:
origin: {my-product-url}
----
====
-+
-Optionally, enter your configuration such as:
-* link:{authentication-book-url}[{authentication-book-title}].
-* link:{authorization-book-url}[{authorization-book-title}].
-* link:{customizing-book-url}[Customization].
-* xref:proc-configuring-an-rhdh-instance-with-tls-in-kubernetes_running-behind-a-proxy[Configure your {ocp-short} integration].
+** Optionally, enter your configuration such as:
+
+*** link:{authentication-book-url}[{authentication-book-title}].
+*** link:{authorization-book-url}[{authorization-book-title}].
+*** link:{customizing-book-url}[Customization].
+*** xref:proc-configuring-an-rhdh-instance-with-tls-in-kubernetes_running-behind-a-proxy[Configure your {ocp-short} integration].
. Provision your custom configuration files to your {ocp-short} cluster.
diff --git a/modules/customizing-templates/proc-adding-templates.adoc b/modules/customizing-templates/proc-adding-templates.adoc
index 837b3c9e34..d71bfd491d 100644
--- a/modules/customizing-templates/proc-adding-templates.adoc
+++ b/modules/customizing-templates/proc-adding-templates.adoc
@@ -4,9 +4,9 @@
:_mod-docs-content-type: PROCEDURE
[id="proc-adding-templates_{context}"]
-= Importing an existing template to {product}
+= Importing an existing Software Template to {product}
-You can add an existing template to your {product} instance by using the Catalog Processor.
+You can add an existing Software Template to your {product} instance by using the Catalog Processor.
.Prerequisites
@@ -15,7 +15,7 @@ You can add an existing template to your {product} instance by using the Catalog
.Procedure
-* In the `{my-app-config-file}` configuration file, modify the `catalog.rules` section to include a rule for templates, and configure the `catalog.locations` section to point to the template that you want to add, as shown in the following example:
+* In the `{my-app-config-file}` configuration file, modify the `catalog.rules` section to include a rule for Software Templates, and configure the `catalog.locations` section to point to the Software Template that you want to add, as shown in the following example:
+
[source,yaml]
----
@@ -28,7 +28,7 @@ catalog:
target: https:///example-template.yaml # <3>
# ...
----
-<1> To allow new templates to be added to the catalog, you must add a `Template` rule.
+<1> To allow new Software Templates to be added to the catalog, you must add a `Template` rule.
<2> If you are importing templates from a repository, such as GitHub or GitLab, use the `url` type.
<3> Specify the URL for the template.
diff --git a/modules/customizing-templates/proc-creating-a-new-software-component-using-templates.adoc b/modules/customizing-templates/proc-creating-a-new-software-component-using-templates.adoc
new file mode 100644
index 0000000000..b202252a6f
--- /dev/null
+++ b/modules/customizing-templates/proc-creating-a-new-software-component-using-templates.adoc
@@ -0,0 +1,40 @@
+// Module included in the following assemblies:
+//
+// * assemblies/assembly-configuring-templates.adoc
+
+:_mod-docs-content-type: PROCEDURE
+[id="proc-creating-a-new-software-component-using-templates_{context}"]
+= Creating a new software component using Software Templates
+
+You can create a new software component using the standard Software Templates that the platform engineers have created. The scaffolding process runs in your {product} instance.
+
+.Procedure
+
+. In your {product} navigation menu, click *Catalog* > *Self-service*.
+. On the *Self-service* page, click *Choose* on the *Templates* tile to initiate the scaffolding process for a template.
+. Follow the wizard instructions as you enter the required details. You can choose parameters from a set of pre-defined options.
+* Optional: In the *Deployment Information* step, you have an option to *Create Workbench for OpenShift AI*.
++
+[NOTE]
+====
+This step is available only for a few templates.
+====
+. In the *Review* step, verify the parameters you have entered and click *Create*.
++
+[NOTE]
+====
+* You can click *Cancel* to abort the software component creation during the template running step only if the current step supports the abort. The abort signal is then sent to a task and none of the following steps are executed.
+* During the creation of the software component, click *Show Logs* to view the log information.
+====
+
+.Verification
+
+* If your software component is not created successfully, you can review the logs on the error page. To return to the *Self-service* page with the same template form and your previously entered values, click *Start Over*.
+
+image::rhdh/template-creation-not-successful.png[]
+
+* If your Software Template is created successfully, a success page similar to the example in the following image is displayed:
++
+--
+image::rhdh/template-creation-successful.png[template-creation-successful]
+--
diff --git a/modules/customizing-templates/proc-creating-templates.adoc b/modules/customizing-templates/proc-creating-templates.adoc
index 8113d724ea..e690c76850 100644
--- a/modules/customizing-templates/proc-creating-templates.adoc
+++ b/modules/customizing-templates/proc-creating-templates.adoc
@@ -4,48 +4,45 @@
:_mod-docs-content-type: PROCEDURE
[id="proc-creating-templates_{context}"]
-= Creating a template by using the Template Editor
+= Creating a Software Template by using the Template Editor templates
-You can create a template by using the Template Editor.
+You can create a Software Template by using the Template Editor templates.
+
+Alternately, you can use the Template Editor to do any of the following actions:
+
+* *File > Open template directory*
+* *File > Create template directory*
+* *File > Close template editor*
+* Use the *Custom Fields Explorer* button to test custom fields in your `templates.yaml` file
+* View *Installed Actions Documentation*
.Procedure
-. Access the Template Editor by using one of the following options:
+To create a Software Template by using the Template Editor templates, complete the following steps:
+
+. In your {product} navigation menu, click *Catalog > Self-service*.
+. Click the overview menu and select *Manage Templates*.
+
image::rhdh/template-editor.png[Template Editor]
-** Open the URL `\https:///create/edit` for your {product} instance.
-** Click *Create...* in the navigation menu of the {product} console, then click the overflow menu button and select *Template editor*.
-. Click *Edit Template Form*.
-. Optional: Modify the YAML definition for the parameters of your template. For more information about these parameters, see <>.
-. In the *Name ** field, enter a unique name for your template.
-. From the *Owner* drop-down menu, choose an owner for the template.
-. Click *Next*.
-. In the *Repository Location* view, enter the following information about the hosted repository that you want to publish the template to:
-.. Select an available *Host* from the drop-down menu.
+
---
+
[NOTE]
====
-Available hosts are defined in the YAML parameters by the `allowedHosts` field:
-
-.Example YAML
-[source,yaml]
-----
-# ...
- ui:options:
- allowedHosts:
- - github.com
-# ...
-----
-
+* The following options on the *Managed Templates* page do not create a software component in your {product} instance:
+** *Template Form Playground*: Use to create and test the `templates.yaml` file
+** *Custom Field Explorer*: Use to test custom fields
====
---
-
-.. In the *Owner ** field, enter an organization, user or project that the hosted repository belongs to.
-.. In the *Repository ** field, enter the name of the hosted repository.
-.. Click *Review*.
-. Review the information for accuracy, then click *Create*.
+. On the *Managed Templates* page, select any of the following options:
+* *Load Template Directory*: Use to load an existing `templates.yaml` file
+** In your local file manager, navigate to the folder where your `templates.yaml` file is stored and click *Select*.
+* *Create New Template*: Use to create a `templates.yaml` file
+** In your local file manager, navigate to the folder where you want the Template Editor to create a `templates.yaml` file and click *Select*.
+.. On the *Template Editor* page, select the `templates.yaml` file.
+.. On the *Fill in some steps* tab, enter text into the required fields and click *Next*.
+.. On the *Repository Location* tab, enter text into the required fields and click *Review*.
+.. Optional: Modify the YAML definition for the parameters of your template. For more information about these parameters, see <>.
+.. Review the information for accuracy, then click *Create*.
.Verification
diff --git a/modules/customizing-templates/proc-searching-and-filtering-software-templates.adoc b/modules/customizing-templates/proc-searching-and-filtering-software-templates.adoc
new file mode 100644
index 0000000000..e549c50d59
--- /dev/null
+++ b/modules/customizing-templates/proc-searching-and-filtering-software-templates.adoc
@@ -0,0 +1,17 @@
+// Module included in the following assemblies:
+//
+// * assemblies/assembly-configuring-templates.adoc
+
+:_mod-docs-content-type: PROCEDURE
+[id="proc-searching-and-filtering-software-templates_{context}"]
+= Searching and filtering Software Templates in your {product} instance
+
+You can search and filter for the Software Template that you want to use to create a new software component.
+
+.Procedure
+
+To search and filter for a Software Template, complete the following steps:
+
+. In the {product} navigation menu, click *Catalog* > *Self-service*.
+. Type the name of the template you are looking for in the *Search* box.
+* If you are looking for templates in a certain category, you can use the *Categories* dropdown.
\ No newline at end of file
diff --git a/modules/customizing-templates/ref-creating-templates.adoc b/modules/customizing-templates/ref-creating-templates.adoc
index d894a74ace..c6d5ac2e84 100644
--- a/modules/customizing-templates/ref-creating-templates.adoc
+++ b/modules/customizing-templates/ref-creating-templates.adoc
@@ -4,11 +4,11 @@
:_mod-docs-content-type: REFERENCE
[id="ref-creating-templates_{context}"]
-= Creating a template as a YAML file
+= Creating a Software Template as a YAML file
-You can create a template by defining a `Template` object as a YAML file.
+You can create a Software Template by defining a `Template` object as a YAML file.
-The `Template` object describes the template and its metadata. It also contains required input variables and a list of actions that are executed by the scaffolding service.
+The `Template` object describes the Software Template and its metadata. It also contains required input variables and a list of actions that are executed by the scaffolding service.
.`Template` object example
[source,yaml]
@@ -56,10 +56,10 @@ spec:
entityRef: ${{ steps['register'].output.entityRef }}
# ...
----
-<1> Specify a name for the template.
-<2> Specify a title for the template. This is the title that is visible on the template tile in the *Create...* view.
-<3> Specify a description for the template. This is the description that is visible on the template tile in the *Create...* view.
-<4> Specify the ownership of the template. The `owner` field provides information about who is responsible for maintaining or overseeing the template within the system or organization. In the provided example, the `owner` field is set to `backstage/techdocs-core`. This means that this template belongs to the `techdocs-core` project in the `backstage` namespace.
+<1> Specify a name for the Software Template.
+<2> Specify a title for the Software Template. This is the title that is visible on the Software Template tile in the *Self-service* view.
+<3> Specify a description for the Software Template. This is the description that is visible on the Software Template tile in the *Self-service* view.
+<4> Specify the ownership of the Software Template. The `owner` field provides information about who is responsible for maintaining or overseeing the Software Template within the system or organization. In the provided example, the `owner` field is set to `backstage/techdocs-core`. This means that this Software Template belongs to the `techdocs-core` project in the `backstage` namespace.
<5> Specify the component type. Any string value is accepted for this required field, but your organization should establish a proper taxonomy for these. {product} instances may read this field and behave differently depending on its value. For example, a `website` type component may present tooling in the {product} interface that is specific to just websites.
+
The following values are common for this field:
@@ -69,7 +69,7 @@ The following values are common for this field:
`website`:: A website.
`library`:: A software library, such as an npm module or a Java library.
--
-<6> Use the `parameters` section to specify parameters for user input that are shown in a form view when a user creates a component by using the template in the {product} console. Each `parameters` subsection, defined by a title and properties, creates a new form page with that definition.
+<6> Use the `parameters` section to specify parameters for user input that are shown in a form view when a user creates a component by using the Software Template in the {product} console. Each `parameters` subsection, defined by a title and properties, creates a new form page with that definition.
<7> Use the `steps` section to specify steps that are executed in the backend. These steps must be defined by using a unique step ID, a name, and an action. You can view actions that are available on your {product} instance by visiting the URL `\https:///create/actions`.
<8> Use the `output` section to specify the structure of output data that is created when the template is used. The `output` section, particularly the `links` subsection, provides valuable references and URLs that users can utilize to access and interact with components that are created from the template.
<9> Provides a reference or URL to the repository associated with the generated component.
diff --git a/modules/customizing-the-appearance/con-customize-rhdh-sidebar-menuitems.adoc b/modules/customizing-the-appearance/con-customize-rhdh-sidebar-menuitems.adoc
new file mode 100644
index 0000000000..2c8936032b
--- /dev/null
+++ b/modules/customizing-the-appearance/con-customize-rhdh-sidebar-menuitems.adoc
@@ -0,0 +1,9 @@
+[id='con-customize-rhdh-sidebar-menuitems_{context}']
+= Customizing the sidebar menu items for your {product-short} instance
+
+The sidebar menu in {product} consists of two main parts that you can configure:
+
+Dynamic plugin menu items:: Your preferences and your active plugins define dynamically one part of the sidebar menu.
+Main menu items:: The core navigation structure of sidebar is static.
+
+* *Dynamic plugin menu items*: These items are displayed beneath the main menu and can be customized based on the plugins installed. The main menu items section is dynamic and can change based on your preferences and installed plugins.
\ No newline at end of file
diff --git a/modules/customizing-the-appearance/proc-configuring-dynamic-plugin-menuitem.adoc b/modules/customizing-the-appearance/proc-configuring-dynamic-plugin-menuitem.adoc
new file mode 100644
index 0000000000..245ef4e200
--- /dev/null
+++ b/modules/customizing-the-appearance/proc-configuring-dynamic-plugin-menuitem.adoc
@@ -0,0 +1,65 @@
+[id='proc-configuring-dynamic-plugin-menuitem_{context}']
+= Configuring a dynamic plugin menu item for your {product-short} instance
+
+Configure a dynamic plugin menu item using the following step:
+
+.Procedure
+
+* In the `{my-app-config-file}` file, update the `menuItems` section of your __ plugin. For example:
++
+[source,yaml]
+----
+dynamicPlugins:
+ frontend:
+ __: # <1>
+ menuItems:
+ : # <2>
+ icon: # home | group | category | extension | school | __ # <3>
+ title: __ # <4>
+ priority: 10 # <5>
+ parent: favorites # <6>
+ enabled: true # <7>
+----
+<1> `__`: Enter the plugin name. This name is the same as the `scalprum.name` key in the `package.json` file.
+<2> `__`: Enter a unique name in the main sidebar navigation for either a standalone menu item or a parent menu item. If this field specifies a plugin menu item, the name of the menu item must match the name using in the corresponding path in `dynamicRoutes`. For example, if `dynamicRoutes` defines `path: /my-plugin`, then `menu_item_name` must be defined as `my-plugin`.
+<3> `icon`: (Optional) Enter the icon name. You can use any of the following icons:
+ ** Default icons, such as `home`, `group`, `category`, `extension`, and `school`. To use default icons, set the icon as an (`" "`) empty string.
+ ** A custom icon, where __ specifies the name of your custom icon
+ ** An SVG icon, such as: `icon: `
+ ** An HTML image, such as: `icon: https://img.icons8.com/ios-glyphs/20/FFFFFF/shop.png`
+<4> `title`: (Optional) Enter the menu item title. Omit it when the title is already specified in the `dynamicRoutes` configuration under `menuItem.text`. To hide the title from the sidebar, set the title as an (`" "`) empty string.
+// Update <4> for release 1.6 as this option (currently a workaround) would be added as a functionality. RHIDP-6333.
+<5> `priority`: (Optional) Sets the order in which menu items appear in the sidebar. The default priority is 0, which places the item at the bottom of the list. A higher priority value places the item higher in the sidebar. You can define this field for each section.
+<6> `parent`: (Optional) Enter the parent menu item under which the current item is nested. If this field is used, the parent menu item must be defined elsewhere in the `menuItems` configuration of any enabled plugin. You can define this field for each section.
+<7> `enabled`: (Optional) If this field is used to hide the menu item from the sidebar, set the value to `false`. To display the menu item in the sidebar, set the value to `true`.
+
++
+.Example `menuItems` configuration
+[source,yaml,subs="+attributes"]
+----
+dynamicPlugins:
+ frontend:
+ __:
+ dynamicRoutes:
+ - path: /my-plugin
+ module: CustomModule
+ importName: FooPluginPage
+ menuItem:
+ icon: fooIcon
+ text: Foo Plugin Page
+ menuItems:
+ my-plugin: # <1>
+ priority: 10 # <2>
+ parent: favorites # <3>
+ favorites: # <4>
+ icon: favorite # <5>
+ title: Favorites # <6>
+ priority: 100 # <7>
+----
+<1> `my-plugin`: Matches the value of the `path` field in `dynamicRoutes`.
+<2> `priority`: Controls order of plugins under the parent menu item.
+<3> `parent`: Nests this plugin under the `favorites` parent menu item.
+<4> `favorites`: Configuration for the parent menu item.
+<5> `icon`: Displays the `favorite` icon from the {product-very-short} system icons.
+<6> `title`: Displays the title name for the parent menu item.
+<7> `priority`: Order of the `favourites` menu item in the sidebar.
\ No newline at end of file
diff --git a/modules/customizing-the-appearance/proc-customize-rhdh-branding-logo.adoc b/modules/customizing-the-appearance/proc-customize-rhdh-branding-logo.adoc
index fb8f92fc59..b6c8cc44f0 100644
--- a/modules/customizing-the-appearance/proc-customize-rhdh-branding-logo.adoc
+++ b/modules/customizing-the-appearance/proc-customize-rhdh-branding-logo.adoc
@@ -5,9 +5,9 @@
[id="proc-customize-rhdh-branding-logo_{context}"]
= Customizing the branding logo of your {product-short} instance
-You can customize the branding logo of your {product-short} instance by configuring the `branding` section the `{my-app-config-file}` file, as shown in the following example:
+You can customize the branding logo of your {product-short} instance by configuring the `branding` section in the `{my-app-config-file}` file, as shown in the following example:
-[source,yaml]
+[source,yaml,subs="+quotes"]
----
app:
branding:
@@ -19,6 +19,22 @@ where:
<1> `fullLogo` is the logo on the expanded (pinned) sidebar and expects a base64 encoded image.
<2> `iconLogo` is the logo on the collapsed (unpinned) sidebar and expects a base64 encoded image.
++
+You can format the `BASE64_EMBEDDED_FULL_LOGO` environment variable as follows:
++
+[source,yaml,subs="+quotes"]
+----
+BASE64_EMBEDDED_FULL_LOGO: "data:__;base64,__"
+----
++
+The following example demonstrates how to customize the `BASE64_EMBEDDED_FULL_LOGO` using the `data:__;base64,__` format:
++
+[source,yaml,subs="+quotes"]
+----
+SVGLOGOBASE64=$(base64 -i logo.svg)
+BASE64_EMBEDDED_FULL_LOGO="data:image/svg+xml;base64,$SVGLOGOBASE64"
+----
+Replace `image/svg+xml` with the correct media type for your image (for example, `image/png` and `image/jpeg`), and adjust the file extension accordingly. As a result, you can embed the logo directly without referencing an external file.
You can also customize the width of the branding logo by setting a value for the `fullLogoWidth` field in the `branding` section, as shown in the following example:
diff --git a/modules/customizing-the-appearance/proc-customize-rhdh-sidebar-menuitems.adoc b/modules/customizing-the-appearance/proc-customize-rhdh-sidebar-menuitems.adoc
index ce6f7ece12..3bd66c95e5 100644
--- a/modules/customizing-the-appearance/proc-customize-rhdh-sidebar-menuitems.adoc
+++ b/modules/customizing-the-appearance/proc-customize-rhdh-sidebar-menuitems.adoc
@@ -1,19 +1,10 @@
[id='proc-customize-rhdh-sidebar-menuitems_{context}']
= Customizing the sidebar menu items for your {product-short} instance
-The sidebar menu in {product} consists of two main parts that you can configure:
-
-Dynamic plugin menu items:: Your preferences and your active plugins define dynamically one part of the sidebar menu.
-Main menu items:: The core navigation structure of sidebar is static.
-
-* *Dynamic plugin menu items*: These items are displayed beneath the main menu and can be customized based on the plugins installed. The main menu items section is dynamic and can change based on your preferences and installed plugins.
+Customize the main menu items using the following steps:
.Procedure
-
-. Customize the main menu items using the following steps:
-+
---
-.. Open the `{my-app-config-file}` file.
+. Open the `{my-app-config-file}` file.
.. To customize the order and parent-child relationships for the main menu items, use the `dynamicPlugins.frontend.default.main-menu-items.menuItems` field.
.. For dynamic plugin menu items, use the `dynamicPlugins.frontend..menuItems` field.
@@ -28,149 +19,34 @@ dynamicPlugins:
title: Home
icon: home
priority: 100
- default.my-group:
+ enabled: true
+ default.my-group:
title: My Group
icon: group
priority: 90
+ enabled: true
default.catalog:
title: Catalog
icon: category
to: catalog
priority: 80
+ enabled: true
default.apis:
title: APIs
icon: extension
to: api-docs
priority: 70
+ enabled: true
default.learning-path:
title: Learning Paths
icon: school,
to: learning-paths
priority: 60
+ enabled: true
default.create:
- title: Create
+ title: Self-service
icon: add
to: create
priority: 50
-----
-
-.Procedure
-
-. To configure a dynamic plugin menu item, update the `menuItems` section of your __ plugin to your `{my-app-config-file}` file. For example:
-+
-[source,yaml]
-----
-dynamicPlugins:
- frontend:
- __: # <1>
- menuItems:
- : # <2>
- icon: # home | group | category | extension | school | __ # <3>
- title: __ # <4>
- priority: 10 # <5>
- parent: favorites # <6>
-----
-<1> `__`: Enter the plugin name. This name is the same as the `scalprum.name` key in the `package.json` file.
-<2> `__`: Enter a unique name in the main sidebar navigation for either a standalone menu item or a parent menu item. If this field specifies a plugin menu item, the name of the menu item must match the name using in the corresponding path in `dynamicRoutes`. For example, if `dynamicRoutes` defines `path: /my-plugin`, then `menu_item_name` must be defined as `my-plugin`.
-<3> `icon`: (Optional) Enter the icon name. You can use any of the following icons:
- * Default icons, such as `home`, `group`, `category`, `extension`, and `school`. To use default icons, set the icon as an (`" "`) empty string.
- * A custom icon, where __ specifies the name of your custom icon
- * An SVG icon, such as: `icon: `
- * An HTML image, such as: `icon: https://img.icons8.com/ios-glyphs/20/FFFFFF/shop.png`
-<4> `title`: (Optional) Enter the menu item title. Omit it when the title is already specified in the `dynamicRoutes` configuration under `menuItem.text`. To hide the title from the sidebar, set the title as an (`" "`) empty string.
-// Update <4> for release 1.6 as this option (currently a workaround) would be added as a functionality. RHIDP-6333.
-<5> `priority`: (Optional) Sets the order in which menu items appear in the sidebar. The default priority is 0, which places the item at the bottom of the list. A higher priority value places the item higher in the sidebar. You can define this field for each section.
-<6> `parent`: (Optional) Enter the parent menu item under which the current item is nested. If this field is used, the parent menu item must be defined elsewhere in the `menuItems` configuration of any enabled plugin. You can define this field for each section.
-
-+
-.Example `menuItems` configuration
-[source,yaml,subs="+attributes"]
-----
-dynamicPlugins:
- frontend:
- __:
- dynamicRoutes:
- - path: /my-plugin
- module: CustomModule
- importName: FooPluginPage
- menuItem:
- icon: fooIcon
- text: Foo Plugin Page
- menuItems:
- my-plugin: # <1>
- priority: 10 # <2>
- parent: favorites # <3>
- favorites: # <4>
- icon: favorite # <5>
- title: Favorites # <6>
- priority: 100 # <7>
-----
-<1> `my-plugin`: Matches the value of the `path` field in `dynamicRoutes`.
-<2> `priority`: Controls order of plugins under the parent menu item.
-<3> `parent`: Nests this plugin under the `favorites` parent menu item.
-<4> `favorites`: Configuration for the parent menu item.
-<5> `icon`: Displays the `favorite` icon from the {product-very-short} system icons.
-<6> `title`: Displays the title name for the parent menu item.
-<7> `priority`: Order of the `favourites` menu item in the sidebar.
-
-
-. To modify a main menu item or add a custom menu item, add a section to the `default.main-menu-items` > `menuItems` section in your `{my-app-config-file}` file. Use the `default.` prefix to identify the key as a main menu item.
-+
-[source,yaml]
-----
-dynamicPlugins:
- frontend:
- default.main-menu-items:
- menuItems:
- default.__: # <1>
- icon: # home | group | category | extension | school | __ # <2>
- title: __ # <3>
- priority: 10 # <4>
- default.__: # <5>
- parent: __ # <6>
- icon: # home | group | category | extension | school | __ # <7>
- title: __ # <8>
- to: __ # <9>
- priority: 100 # <10>
-----
-<1> `default.__`: (Optional) Enter the menu group parent item name to configure static main menu items. If no `default.__` has a `parent` value set, this field is not needed.
-<2> `icon`: Enter the menu icon. Required for parent menu items.
-<3> `title`: Enter the menu group title. Required for parent menu items.
-<4> `priority`: (Optional) Enter the order of this menu item within its menu level.
-<5> `default.__`: Enter the menu item name for which you want to override the default value. Add the `default.` prefix to identify a main menu item.
-<6> `parent`: (Optional) Enter the parent menu item for this item. Required if is specified as the child of any menu items.
-<7> `icon`: (Optional) Enter the menu icon. To use the default icon, set the icon as an (`" "`) empty string.
-<8> `title`: (Optional) Enter the menu group title. Only required for adding a new custom main menu item. To hide a default main menu item title from the sidebar, set the title as an (`" "`) empty string.
-// Update <8> for release 1.6 as this option (currently a workaround) would be added as a functionality. RHIDP-6333.
-<9> `to`: (Optional) Enter the path that the menu item navigates to. If it is not set, it defaults to the home page.
-<10> `priority`: (Optional) Enter the order of this menu item within its menu level.
-
-+
-.Example `mainItems` configuration
-[source,yaml]
-----
-default.main-menu-items:
- menuItems:
- default.catalog:
- icon: category # <1>
- title: My Catalog
- priority: 5
- default.learning-path:
- title: '' # <2> to hide the learning path from default sidebar
- default.parentlist: # <3>
- title: Overview
- icon: bookmarks
- default.home:
- parent: default.parentlist # <4>
- default.references:
- title: References # <5>
- icon: school # <6>
- to: /references # <7>
-----
-<1> `icon`: Specifies if you want to change the icon default menu item for the catalog.
-<2> `title`: Specifies an empty string `" "` to hide the learning path from the default sidebar.
-<3> `default.parentlist`: Introduces the parent menu item.
-<4> `parent`: Nests home menu under the `default.parentlist` parent menu item.
-<5> `title`: Specifies a name for `default.references`
-<6> `icon`: Displays the `school` icon.
-<7> `to`: Redirects `default.references` to the `/references` page.
\ No newline at end of file
+ enabled: true
+----
\ No newline at end of file
diff --git a/modules/customizing-the-appearance/proc-customizing-entity-detail-tab-layout.adoc b/modules/customizing-the-appearance/proc-customizing-entity-detail-tab-layout.adoc
new file mode 100644
index 0000000000..16ada838b6
--- /dev/null
+++ b/modules/customizing-the-appearance/proc-customizing-entity-detail-tab-layout.adoc
@@ -0,0 +1,50 @@
+[id="configuring-entity-detail-tab-layout_{context}"]
+= Configuring entity detail tab layout
+
+Each {product} entity detail tab has a default opinionated layout.
+For consistency with your organization needs, you can change the entity detail tab content when the plugin that contributes the tab content allows a configuration.
+
+.Prerequisites
+
+* The plugin that contributes the tab content allows a configuration, such as https://github.com/redhat-developer/rhdh/blob/release-{product-version}/dynamic-plugins.default.yaml[{product-short} plugins defining a default configuration in a `config` section].
+
+.Procedure
+
+* Copy the plugin default configuration in your `{my-app-config-file}` file, and change the `layout` properties.
++
+[source,yaml,subs="+quotes"]
+----
+global:
+ dynamic:
+ plugins:
+ - package: __
+ disabled: false
+ pluginConfig:
+ dynamicPlugins:
+ frontend:
+ __:
+ mountPoints:
+ - mountPoint: __
+ importName: __
+ config:
+ layout:
+ gridColumn:
+ lg: span 6
+ xs: span 12
+----
+`package`::
+Enter your package location, such as `./dynamic-plugins/dist/backstage-community-plugin-tekton`.
+
+`__`::
+Enter your plugin name, such as: `backstage-community.plugin-tekton`.
+
+`mountPoint`::
+Copy the mount point defined in the plugin default configuration, such as: `entity.page.ci/cards`.
+
+`importName`::
+Copy the import name defined in the plugin default configuration, such as: `TektonCI`.
+
+`layout`:: Enter your layout configuration.
+The tab content is displayed in a responsive grid that uses a 12 column-grid and supports different breakpoints (`xs`,
+`sm`, `md`, `lg`, `xl`) that can be specified for a CSS property, such as `gridColumn`.
+The example uses 6 of the 12 columns to show two Tekton CI cards side-by-side on large (`lg`) screens (`span 6` columns) and show them among themselves (`xs` and above `span 12` columns).
diff --git a/modules/customizing-the-appearance/proc-customizing-entity-tab-titles.adoc b/modules/customizing-the-appearance/proc-customizing-entity-tab-titles.adoc
new file mode 100644
index 0000000000..85952723a7
--- /dev/null
+++ b/modules/customizing-the-appearance/proc-customizing-entity-tab-titles.adoc
@@ -0,0 +1,42 @@
+[id="configuring-entity-tab-titles_{context}"]
+= Configuring entity tab titles
+
+{product} provides a default opinionated tab set for catalog entity views.
+For consistency with your organization needs, you can rename, reorder, remove, and add tab titles.
+
+.Procedure
+* For each tab to modify, enter your desired values in the `entityTabs` section in your `{my-app-config-file}` file:
++
+[source,yaml,subs="+quotes"]
+----
+upstream:
+ backstage:
+ appConfig:
+ dynamicPlugins:
+ frontend:
+ __:
+ entityTabs:
+ - mountPoint: __
+ path: __
+ title: __
+ priority: __
+----
+
+`__`::
+Enter the plugin name, such as `backstage-community.plugin-topology`.
+
+`mountPoint`::
+Enter the tab mountpoint, such as `entity.page.topology`.
+
+`path`::
+Enter the tab path, such as `/topology`.
+`title`::
+Enter the tab title, such as `Topology`.
+
+`priority`::
+Optional.
++
+To reorder tabs, enter the tab priority, such as `42`.
+Higher priority appears first.
++
+To remove a tab, enter a negative value, such as `-1`.
diff --git a/modules/customizing-the-appearance/proc-modifying-or-adding-rhdh-custom-menuitem.adoc b/modules/customizing-the-appearance/proc-modifying-or-adding-rhdh-custom-menuitem.adoc
new file mode 100644
index 0000000000..782d9ae98a
--- /dev/null
+++ b/modules/customizing-the-appearance/proc-modifying-or-adding-rhdh-custom-menuitem.adoc
@@ -0,0 +1,70 @@
+[id='proc-modifying-or-adding-rhdh-custom-menuitem_{context}']
+= Modifying or adding a custom menu items for your {product-short} instance
+
+Modify a main menu item or add a custom menu item using the following step:
+
+.Procedure
+* In the `{my-app-config-file}` file, add a section to the `default.main-menu-items` > `menuItems` section. Use the `default.` prefix to identify the key as a main menu item.
++
+[source,yaml]
+----
+dynamicPlugins:
+ frontend:
+ default.main-menu-items:
+ menuItems:
+ default.__: # <1>
+ icon: # home | group | category | extension | school | __ # <2>
+ title: __ # <3>
+ priority: 10 # <4>
+ default.__: # <5>
+ parent: __ # <6>
+ icon: # home | group | category | extension | school | __ # <7>
+ title: __ # <8>
+ to: __ # <9>
+ priority: 100 # <10>
+ enabled: true # <11>
+----
+<1> `default.__`: (Optional) Enter the menu group parent item name to configure static main menu items. If no `default.__` has a `parent` value set, this field is not needed.
+<2> `icon`: Enter the menu icon. Required for parent menu items.
+<3> `title`: Enter the menu group title. Required for parent menu items.
+<4> `priority`: (Optional) Enter the order of this menu item within its menu level.
+<5> `default.__`: Enter the menu item name for which you want to override the default value. Add the `default.` prefix to identify a main menu item.
+<6> `parent`: (Optional) Enter the parent menu item for this item. Required if is specified as the child of any menu items.
+<7> `icon`: (Optional) Enter the menu icon. To use the default icon, set the icon as an (`" "`) empty string.
+<8> `title`: (Optional) Enter the menu group title. Only required for adding a new custom main menu item. To hide a default main menu item title from the sidebar, set the title as an (`" "`) empty string.
+// Update <8> for release 1.6 as this option (currently a workaround) would be added as a functionality. RHIDP-6333.
+<9> `to`: (Optional) Enter the path that the menu item navigates to. If it is not set, it defaults to the home page.
+<10> `priority`: (Optional) Enter the order of this menu item within its menu level.
+<11> `enabled`: (Optional) If this field is used to display the menu item in the sidebar, set the value to `true`. To hide the menu item from the sidebar, set the value to `false`.
+
++
+.Example `mainItems` configuration
+[source,yaml]
+----
+default.main-menu-items:
+ menuItems:
+ default.catalog:
+ icon: category # <1>
+ title: My Catalog
+ priority: 5
+ default.learning-path:
+ title: '' # <2>
+ default.parentlist: # <3>
+ title: Overview
+ icon: bookmarks
+ default.home:
+ parent: default.parentlist # <4>
+ default.references:
+ title: References # <5>
+ icon: school # <6>
+ to: /references # <7>
+ enabled: true # <8>
+----
+<1> `icon`: Specifies if you want to change the icon default menu item for the catalog.
+<2> `title`: Specifies an empty string `" "` to hide the learning path from the default sidebar.
+<3> `default.parentlist`: Introduces the parent menu item.
+<4> `parent`: Nests home menu under the `default.parentlist` parent menu item.
+<5> `title`: Specifies a name for `default.references`
+<6> `icon`: Displays the `school` icon.
+<7> `to`: Redirects `default.references` to the `/references` page.
+<8> `enabled`: (Optional) If this field is used to display the menu item in the sidebar, set the value to `true`. To hide the menu item from the sidebar, set the value to `false`.
\ No newline at end of file
diff --git a/modules/customizing-the-learning-paths/con-about-learning-paths.adoc b/modules/customizing-the-learning-paths/con-about-learning-paths.adoc
new file mode 100644
index 0000000000..ad93f8e923
--- /dev/null
+++ b/modules/customizing-the-learning-paths/con-about-learning-paths.adoc
@@ -0,0 +1,4 @@
+[id='con-about-learning-paths_{context}']
+= About Learning Paths
+
+You can use the Learning Paths plugin in {product} to integrate customized e-learning content into the developer workflows. By using Learning Paths, you can create a collaborative learning culture, boost productivity, and ensure that teams stay updated with relevant best practices and technologies. The overall purpose is to accelerate onboarding, address skill gaps, ensure regulatory compliance, promote best practices, and facilitate product updates.
\ No newline at end of file
diff --git a/modules/customizing-the-learning-paths/proc-customize-rhdh-learning-paths.adoc b/modules/customizing-the-learning-paths/proc-customize-rhdh-learning-paths.adoc
deleted file mode 100644
index 2ae0faf956..0000000000
--- a/modules/customizing-the-learning-paths/proc-customize-rhdh-learning-paths.adoc
+++ /dev/null
@@ -1,82 +0,0 @@
-[id='proc-customize-rhdh-learning-paths_{context}']
-= Customizing the Learning Paths in {product}
-
-In {product}, you can configure Learning Paths by passing the data into the `{my-app-config-file}` file as a proxy. The base URL must include the `/developer-hub/learning-paths` proxy.
-
-[NOTE]
-====
-Due to the use of overlapping `pathRewrites` for both the `learning-path` and `homepage` quick access proxies, you must create the `learning-paths` configuration (`^api/proxy/developer-hub/learning-paths`) before you create the `homepage` configuration (`^/api/proxy/developer-hub`).
-
-For more information about customizing the Home page in {product}, see xref:customizing-the-home-page[Customizing the Home page in {product}].
-====
-
-You can provide data to the Learning Path from the following sources:
-
-* JSON files hosted on GitHub or GitLab.
-* A dedicated service that provides the Learning Path data in JSON format using an API.
-
-== Using hosted JSON files to provide data to the Learning Paths
-
-.Prerequisites
-
-You have installed {product} by using either the Operator or Helm chart.
-For more information, see xref:{installing-on-ocp-book-url}#assembly-install-rhdh-ocp[{installing-on-ocp-book-title}].
-
-.Procedure
-
-To access the data from the JSON files, complete the following step:
-
-* Add the following code to the `{my-app-config-file}` file:
-+
-[source,yaml]
-----
-proxy:
- endpoints:
- '/developer-hub':
- target: https://raw.githubusercontent.com/
- pathRewrite:
- '^/api/proxy/developer-hub/learning-paths': '/redhat-developer/rhdh/main/packages/app/public/learning-paths/data.json'
- '^/api/proxy/developer-hub/tech-radar': '/redhat-developer/rhdh/main/packages/app/public/tech-radar/data-default.json'
- '^/api/proxy/developer-hub': '/redhat-developer/rhdh/main/packages/app/public/homepage/data.json'
- changeOrigin: true
- secure: true
-----
-
-== Using a dedicated service to provide data to the Learning Paths
-
-When using a dedicated service, you can do the following:
-
-* Use the same service to provide the data to all configurable {product-short} pages or use a different service for each page.
-* Use the https://github.com/redhat-developer/red-hat-developer-hub-customization-provider[`red-hat-developer-hub-customization-provider`] as an example service, which provides data for both the Home and Tech Radar pages. The `red-hat-developer-hub-customization-provider` service provides the same data as default {product-short} data. You can fork the `red-hat-developer-hub-customization-provider` service repository from GitHub and modify it with your own data, if required.
-* Deploy the `red-hat-developer-hub-customization-provider` service and the {product-short} Helm chart on the same cluster.
-
-.Prerequisites
-
-* You have installed the {product} using Helm chart.
-For more information, see xref:{installing-on-ocp-book-url}#assembly-install-rhdh-ocp[{installing-on-ocp-book-title}].
-
-.Procedure
-
-To use a dedicated service to provide the Learning Path data, complete the following steps:
-
-. Add the following code to the link:{configuring-book-url}[`{my-app-config-file}` file]:
-+
-[source,yaml]
-----
- proxy:
- endpoints:
- # Other Proxies
- '/developer-hub/learning-paths':
- target: ${LEARNING_PATH_DATA_URL}
- changeOrigin: true
- # Change to "false" in case of using self hosted cluster with a self-signed certificate
- secure: true
-----
-where the `LEARNING_PATH_DATA_URL` is defined as `pass:c[http:///learning-paths]`, for example, `pass:c[http://rhdh-customization-provider/learning-paths]`.
-+
-[NOTE]
-====
-You can define the `LEARNING_PATH_DATA_URL` by adding it to `rhdh-secrets` or by directly replacing it with its value in your custom ConfigMap.
-====
-+
-. Delete the {product-short} pod to ensure that the new configurations are loaded correctly.
diff --git a/modules/customizing-the-learning-paths/proc-customizing-the-learning-paths-by-using-a-dedicated-service.adoc b/modules/customizing-the-learning-paths/proc-customizing-the-learning-paths-by-using-a-dedicated-service.adoc
new file mode 100644
index 0000000000..7e07c05fe7
--- /dev/null
+++ b/modules/customizing-the-learning-paths/proc-customizing-the-learning-paths-by-using-a-dedicated-service.adoc
@@ -0,0 +1,23 @@
+[id='proc-customizing-the-learning-paths-by-using-a-customization-service_{context}']
+= Customizing the Learning Paths by using a customization service
+
+For advanced scenarios, you can host your {product} customization service to provide data to all configurable {product-short} pages, such as the Learning Paths.
+You can even use a different service for each page.
+
+.Procedure
+. Deploy your {product-short} customization service on the same {ocp-short} cluster as your {product-short} instance.
+You can find an example at link:https://github.com/redhat-developer/red-hat-developer-hub-customization-provider[`red-hat-developer-hub-customization-provider`], that provides the same data as default {product-short} data.
+The customization service provides a Learning Paths data URL such as: `pass:c,a,q[http://__/learning-paths]`.
+
+. Configure the {product-short} proxy to use your dedicated service to provide the Learning Path data, add the following to the link:{configuring-book-url}[`{my-app-config-file}` file]:
++
+[source,yaml,subs='+quotes']
+----
+proxy:
+ endpoints:
+ '/developer-hub/learning-paths':
+ target: __
+ changeOrigin: true
+ qsecure: true # <1>
+----
+<1> Change to "false" in case of using self hosted cluster with a self-signed certificate
diff --git a/modules/customizing-the-learning-paths/proc-customizing-the-learning-paths-by-using-hosted-json-files.adoc b/modules/customizing-the-learning-paths/proc-customizing-the-learning-paths-by-using-hosted-json-files.adoc
new file mode 100644
index 0000000000..8fdc190bdb
--- /dev/null
+++ b/modules/customizing-the-learning-paths/proc-customizing-the-learning-paths-by-using-hosted-json-files.adoc
@@ -0,0 +1,49 @@
+[id='proc-customizing-the-learning-paths-by-using-a-hosted-json-file_{context}']
+= Customizing the Learning Paths by using a hosted JSON file
+
+For ease of use and simplicity, you can configure the Learning Paths by using a hosted JSON file.
+
+.Procedure
+. Publish the JSON file containing your Learning Paths data to a web server, such as GitHub or Gitlab. You can find an example at link:https://raw.githubusercontent.com/redhat-developer/rhdh/release-{product-version}/packages/app/public/learning-paths/data.json[].
+
+. Configure the {product-short} proxy to access the Learning Paths data from the hosted JSON file, by adding the following to the `{my-app-config-file}` file:
++
+[source,yaml,subs='+quotes']
+----
+proxy:
+ endpoints:
+ '/developer-hub':
+ target: __
+ pathRewrite:
+ '^/api/proxy/developer-hub/learning-paths': '__'
+ changeOrigin: true
+ secure: true
+----
+
+`__`:: Enter the hosted JSON file base URL, such as `https://raw.githubusercontent.com`.
+
+`__`:: Enter the hosted JSON file path without the base URL, such as
+`'/redhat-developer/rhdh/main/packages/app/public/learning-paths/data.json'`
++
+[TIP]
+====
+When also configuring the home page, due to the use of overlapping `pathRewrites` for both the `learning-path` and `homepage` quick access proxies, create the `learning-paths` configuration (`^api/proxy/developer-hub/learning-paths`) before you create the `homepage` configuration (`^/api/proxy/developer-hub`).
+For example:
+
+[source,yaml]
+----
+proxy:
+ endpoints:
+ '/developer-hub':
+ target: https://raw.githubusercontent.com/
+ pathRewrite:
+ '^/api/proxy/developer-hub/learning-paths': '/redhat-developer/rhdh/main/packages/app/public/learning-paths/data.json'
+ '^/api/proxy/developer-hub/tech-radar': '/redhat-developer/rhdh/main/packages/app/public/tech-radar/data-default.json'
+ '^/api/proxy/developer-hub': '/redhat-developer/rhdh/main/packages/app/public/homepage/data.json'
+ changeOrigin: true
+ secure: true
+----
+====
+
+.Additional resources
+* xref:customizing-the-home-page[Customizing the Home page in {product}].
diff --git a/modules/customizing-the-learning-paths/proc-starting-and-completing-lessions-in-learning-paths.adoc b/modules/customizing-the-learning-paths/proc-starting-and-completing-lessions-in-learning-paths.adoc
new file mode 100644
index 0000000000..9a0dc078b3
--- /dev/null
+++ b/modules/customizing-the-learning-paths/proc-starting-and-completing-lessions-in-learning-paths.adoc
@@ -0,0 +1,20 @@
+[id='proc-starting-and-completing-lessons-in-learning-paths_{context}']
+= Starting and completing lessons in Learning Paths
+
+As a developer, you can start a course and complete the lessons at your own pace.
+
+.Prerequisites
+. You can log in to developers.redhat.com
+. Your platform engineer has granted you access to the Learning Paths plugin.
+
+
+.Procedure
+To start a course in Learning Paths, complete the following steps:
+
+. In your {product} navigation menu, click *Learning Paths*.
+. Select the tile for the course you would like to begin.
++
+[NOTE]
+====
+This action redirects you to the main page of the course in link:https://developers.redhat.com[the Red Hat Developers site].
+====
\ No newline at end of file
diff --git a/modules/customizing-the-tech-radar-page/proc-customize-rhdh-tech-radar-page.adoc b/modules/customizing-the-tech-radar-page/proc-customize-rhdh-tech-radar-page.adoc
deleted file mode 100644
index 773032d016..0000000000
--- a/modules/customizing-the-tech-radar-page/proc-customize-rhdh-tech-radar-page.adoc
+++ /dev/null
@@ -1,74 +0,0 @@
-[id='proc-customize-rhdh-tech-radar-page_{context}']
-= Customizing the Tech Radar page in {product}
-
-In {product}, the Tech Radar page is provided by the `tech-radar` dynamic plugin, which is disabled by default. For information about enabling dynamic plugins in {product} see link:{configuring-dynamic-plugins-book-url}[{configuring-dynamic-plugins-book-title}].
-
-In {product}, you can configure Learning Paths by passing the data into the `{my-app-config-file}` file as a proxy. The base Tech Radar URL must include the `/developer-hub/tech-radar` proxy.
-
-[NOTE]
-====
-Due to the use of overlapping `pathRewrites` for both the `tech-radar` and `homepage` quick access proxies, you must create the `tech-radar` configuration (`^api/proxy/developer-hub/tech-radar`) before you create the `homepage` configuration (`^/api/proxy/developer-hub`).
-
-For more information about customizing the Home page in {product}, see xref:customizing-the-home-page[Customizing the Home page in {product}].
-====
-
-You can provide data to the Tech Radar page from the following sources:
-
-* JSON files hosted on GitHub or GitLab.
-* A dedicated service that provides the Tech Radar data in JSON format using an API.
-
-== Using hosted JSON files to provide data to the Tech Radar page
-
-.Prerequisites
-
-* You have installed {product} by using either the Operator or Helm chart. For more information, see link:{installing-on-ocp-book-url}[{installing-on-ocp-book-title}].
-* You have specified the data sources for the Tech Radar plugin in the `integrations` section of the `{my-app-config-file}` file. For example, to configure GitHub as an integration, see link:{authentication-book-url}#authenticating-with-github[Authenticating with GitHub].
-
-.Procedure
-
-To access the data from the JSON files, complete the following step:
-
-. Enable the `./dynamic-plugins/dist/backstage-community-plugin-tech-radar` and `/dynamic-plugins/dist/backstage-community-plugin-tech-radar-backend-dynamic` plugins.
-. Add the following code to the `{my-app-config-file}` file:
-+
-[source,yaml]
-----
-techRadar:
- url: ${TECH_RADAR_DATA_URL} <1>
-----
-<1> `TECH_RADAR_DATA_URL` is the URL from which the JSON data is loaded.
-
-== Using a dedicated service to provide data to the Tech Radar page
-
-When using a dedicated service, you can do the following:
-
-* Use the same service to provide the data to all configurable {product-short} pages or use a different service for each page.
-* Use the https://github.com/redhat-developer/red-hat-developer-hub-customization-provider[`red-hat-developer-hub-customization-provider`] as an example service, which provides data for both the Home and Tech Radar pages. The `red-hat-developer-hub-customization-provider` service provides the same data as default {product-short} data. You can fork the `red-hat-developer-hub-customization-provider` service repository from GitHub and modify it with your own data, if required.
-* Deploy the `red-hat-developer-hub-customization-provider` service and the {product-short} Helm chart on the same cluster.
-
-.Prerequisites
-
-* You have installed the {product} using Helm chart.
-For more information, see xref:{installing-on-ocp-book-url}#assembly-install-rhdh-ocp-helm[{installing-on-ocp-book-title} with the Helm chart].
-
-.Procedure
-
-To use a separate service to provide the Tech Radar data, complete the following steps:
-
-. Add the dedicated service as an allowed host by adding the following code to the `{my-app-config-file}` file:
-+
-[source,yaml]
-----
-backend:
- reading:
- allow:
- - host: 'hostname'
-----
-. Add the following to the `{my-app-config-file}` file:
-+
-[source,yaml]
-----
-techRadar:
- url: ${TECH_RADAR_DATA_URL} <1>
-----
-<1> `TECH_RADAR_DATA_URL` is the URL from which the JSON data is loaded.
diff --git a/modules/customizing-the-tech-radar-page/proc-customizing-the-tech-radar-page-by-using-a-customization-service.adoc b/modules/customizing-the-tech-radar-page/proc-customizing-the-tech-radar-page-by-using-a-customization-service.adoc
new file mode 100644
index 0000000000..bf1498043e
--- /dev/null
+++ b/modules/customizing-the-tech-radar-page/proc-customizing-the-tech-radar-page-by-using-a-customization-service.adoc
@@ -0,0 +1,37 @@
+[id='proc-customizing-rhdh-tech-radar-page-by-using-a-customization-service_{context}']
+= Customizing the Tech Radar page by using a customization service
+
+For advanced scenarios, you can host your {product} customization service to provide data to all configurable {product-short} pages, such as the Tech Radar page.
+You can even use a different service for each page.
+
+.Prerequisites
+* You have specified the data sources for the Tech Radar plugin in the `integrations` section of the `{my-app-config-file}` file.
+For example, to configure GitHub as an integration, see link:{authentication-book-url}#authenticating-with-github[Authenticating with GitHub].
+
+* You have enabled the `./dynamic-plugins/dist/backstage-community-plugin-tech-radar` and `/dynamic-plugins/dist/backstage-community-plugin-tech-radar-backend-dynamic` plugins.
+
+.Procedure
+. Deploy your {product-short} customization service on the same {ocp-short} cluster as your {product-short} instance.
+You can find an example at link:https://github.com/redhat-developer/red-hat-developer-hub-customization-provider[`red-hat-developer-hub-customization-provider`], that provides the same data as default {product-short} data.
+The customization service provides a Tech Radar data URL such as: `pass:c,a,q[http://__/tech-radar]`.
+
+. Add the dedicated service as an allowed host by adding the following code to the `{my-app-config-file}` file:
++
+[source,yaml,subs='+quotes']
+----
+backend:
+ reading:
+ allow:
+ - host: '__'
+----
+`__`:: Enter the base URL of your Tech Radar data URL, such as: `pass:c,a,q[__]`.
+
+. Add the following to the `{my-app-config-file}` file:
++
+[source,yaml,subs='+quotes']
+----
+techRadar:
+ url: __
+----
+
+`__`:: Enter your Tech Radar data URL, such as: `pass:c,a,q[http://__/tech-radar]`.
diff --git a/modules/customizing-the-tech-radar-page/proc-customizing-the-tech-radar-page-by-using-a-json-file.adoc b/modules/customizing-the-tech-radar-page/proc-customizing-the-tech-radar-page-by-using-a-json-file.adoc
new file mode 100644
index 0000000000..697d3111b5
--- /dev/null
+++ b/modules/customizing-the-tech-radar-page/proc-customizing-the-tech-radar-page-by-using-a-json-file.adoc
@@ -0,0 +1,24 @@
+[id='proc-customizing-the-tech-radar-page-by-using-a-json-file_{context}']
+= Customizing the Tech Radar page by using a JSON file
+
+For ease of use and simplicity, you can configure the Tech Radar page by using a hosted JSON file.
+
+.Prerequisites
+
+* You have specified the data sources for the Tech Radar plugin in the `integrations` section of the `{my-app-config-file}` file. For example, to configure GitHub as an integration, see link:{authentication-book-url}#authenticating-with-github[Authenticating with GitHub].
+
+* You have enabled the `./dynamic-plugins/dist/backstage-community-plugin-tech-radar` and `/dynamic-plugins/dist/backstage-community-plugin-tech-radar-backend-dynamic` plugins.
+
+.Procedure
+
+. Publish the JSON file containing your Tech Radar data to a web server, such as GitHub or Gitlab. You can find an example at link:https://raw.githubusercontent.com/redhat-developer/rhdh/release-{product-version}/packages/app/public/tech-radar/data-default.json[].
+
+. Configure {product-short} to access the Tech Radar data from the hosted JSON files, by adding the following to the `{my-app-config-file}` file:
++
+[source,yaml,subs='+quotes']
+----
+techRadar:
+ url: __
+----
+
+`__`:: Enter the Tech Radar data hosted JSON URL.
diff --git a/modules/customizing/proc-customizing-rhdh-metadata-card.adoc b/modules/customizing/proc-customizing-rhdh-metadata-card.adoc
new file mode 100644
index 0000000000..9cb40baf1d
--- /dev/null
+++ b/modules/customizing/proc-customizing-rhdh-metadata-card.adoc
@@ -0,0 +1,35 @@
+:_mod-docs-content-type: PROCEDURE
+[id="proc-customizing-rhdh-metadata-card_{context}"]
+= Customizing the {product-very-short} Metadata card on the Settings page
+
+The *Settings* page in {product} contains a *{product-very-short} Metadata* card. By default, the *{product-very-short} Metadata* card shows the *{product-very-short} Version* and *Backstage Version* of your {product} instance. When you click the *Show more* icon, the card expands to also show *Upstream*, *Midstream*, and *Build Time* information.
+
+You can override the default to show custom build information about your {product} instance in the card. You can customize the card title as well as the card contents.
+
+.Procedure
+
+To customize the *{product-very-short} Metadata* card, complete the following step:
+
+* In your app-config.yaml file, configure the `buildinfo` field. For example:
++
+[source,yaml,subs=+attributes,+quotes]
+----
+buildInfo:
+ title: __
+ card:
+ TechDocs builder: '__'
+ Authentication provider: '__'
+ RBAC: disabled
+ full: true
+
+----
++
+where
+
+__ :: Specifies the title that you want to display on the customized card.
+__ :: Specifies whether to generate and publish the docs or to only fetch the docs when using the default build strategy. Possible values are `local` or `external`. If you want to generate and publish the docs, set the `techdocs.builder` field to `local` in your app-config.yaml file. If you only want to fetch the docs without generating and publishing them, set the `techdocs.builder` field to `external`.
+__ :: Specifies the authentication provider that you want to use. Example values are `GitHub` or `GitLab`.
+`full` :: Specifies what information is shown on the customized card. Possible values are `true` or `false`. If set to `true`, only the information specified in this configuration is shown on the card. If set to `false`, the specified information is shown on the card along with the build versions. The default value is `true`.
+
+.Result
+The *Settings* page displays a card with a custom title and custom build information about your {product} instance.
diff --git a/modules/dynamic-plugins/con-dynamic-plugins-cache.adoc b/modules/dynamic-plugins/con-dynamic-plugins-cache.adoc
index c4b80167c5..0dbf8a5fc5 100644
--- a/modules/dynamic-plugins/con-dynamic-plugins-cache.adoc
+++ b/modules/dynamic-plugins/con-dynamic-plugins-cache.adoc
@@ -10,153 +10,5 @@ When you enable dynamic plugins cache:
* During boot, if a plugin's package reference matches the previous installation and the checksum is unchanged, the download is skipped.
* Plugins that are disabled since the previous boot are automatically removed.
-== Enabling the dynamic plugins cache
-To enable the dynamic plugins cache in {product-very-short}, the plugins directory `dynamic-plugins-root` must be a persistent volume.
-
-=== Creating a PVC for the dynamic plugin cache by using the Operator
-
-For operator-based installations, you must manually create the persistent volume claim (PVC) by replacing the default `dynamic-plugins-root` volume with a PVC named `dynamic-plugins-root`.
-
-.Procedure
-. Create the persistent volume definition and save it to a file, such as `pvc.yaml`. For example:
-+
-[source,yaml]
-----
-kind: PersistentVolumeClaim
-apiVersion: v1
-metadata:
- name: dynamic-plugins-root
-spec:
- accessModes:
- - ReadWriteOnce
- resources:
- requests:
- storage: 5Gi
-
-----
-+
-[NOTE]
-====
-This example uses `ReadWriteOnce` as the access mode which prevents multiple replicas from sharing the PVC across different nodes.
-To run multiple replicas on different nodes, depending on your storage driver, you must use an access mode such as `ReadWriteMany`.
-====
-. To apply this PVC to your cluster, run the following command:
-+
-[source,terminal]
-----
-oc apply -f pvc.yaml
-----
-. Replace the default `dynamic-plugins-root` volume with a PVC named `dynamic-plugins-root`. For example:
-+
-[source,yaml]
-----
-apiVersion: rhdh.redhat.com/v1alpha3
-kind: Backstage
-metadata:
- name: developer-hub
-spec:
- deployment:
- patch:
- spec:
- template:
- spec:
- volumes:
- - $patch: replace
- name: dynamic-plugins-root
- persistentVolumeClaim:
- claimName: dynamic-plugins-root
-----
-+
-[NOTE]
-To avoid adding a new volume, you must use the `$patch: replace` directive.
-
-=== Creating a PVC for the dynamic plugin cache using the Helm Chart
-For Helm chart installations, if you require the dynamic plugin cache to persist across pod restarts, you must create a persistent volume claim (PVC) and configure the Helm chart to use it.
-
-.Procedure
-. Create the persistent volume definition. For example:
-+
-[source,yaml]
-----
-kind: PersistentVolumeClaim
-apiVersion: v1
-metadata:
- name: dynamic-plugins-root
-spec:
- accessModes:
- - ReadWriteOnce
- resources:
- requests:
- storage: 5Gi
-----
-+
[NOTE]
-====
-This example uses `ReadWriteOnce` as the access mode which prevents multiple replicas from sharing the PVC across different nodes.
-To run multiple replicas on different nodes, depending on your storage driver, you must use an access mode such as `ReadWriteMany`.
-====
-
-. To apply this PVC to your cluster, run the following command:
-+
-[source,terminal]
-----
-oc apply -f pvc.yaml
-----
-. Configure the Helm chart to use the PVC. For example:
-+
-[source,yaml]
-----
-upstream:
- backstage:
- extraVolumes:
- - name: dynamic-plugins-root
- persistentVolumeClaim:
- claimName: dynamic-plugins-root
- - name: dynamic-plugins
- configMap:
- defaultMode: 420
- name: '{{ printf "%s-dynamic-plugins" .Release.Name }}'
- optional: true
- - name: dynamic-plugins-npmrc
- secret:
- defaultMode: 420
- optional: true
- secretName: '{{ printf "%s-dynamic-plugins-npmrc" .Release.Name }}'
- - name: dynamic-plugins-registry-auth
- secret:
- defaultMode: 416
- optional: true
- secretName: '{{ printf "%s-dynamic-plugins-registry-auth" .Release.Name }}'
- - name: npmcacache
- emptyDir: {}
- - name: temp
- emptyDir: {}
-----
-+
-[NOTE]
-====
-When you configure the Helm chart to use the PVC, you must also include the link:https://github.com/redhat-developer/rhdh-chart/blob/release-{product-version}/charts/backstage/values.yaml#L145-L181[`extraVolumes`] defined in the default Helm chart.
-====
-
-== Configuring the dynamic plugins cache
-You can set the following optional dynamic plugin cache parameters in your `dynamic-plugins.yaml` file:
-
-* `forceDownload`: Set the value to `true` to force a reinstall of the plugin, bypassing the cache. The default value is `false`.
-
-* `pullPolicy`: Similar to the `forceDownload` parameter and is consistent with other image container platforms. You can use one of the following values for this key:
-
-** `Always`: This value compares the image digest in the remote registry and downloads the artifact if it has changed, even if the plugin was previously downloaded.
-** `IfNotPresent`: This value downloads the artifact if it is not already present in the dynamic-plugins-root folder, without checking image digests.
-+
-[NOTE]
-The `pullPolicy` setting is also applied to the NPM downloading method, although `Always` will download the remote artifact without a digest check. The existing `forceDownload` option remains functional, however, the `pullPolicy` option takes precedence. The `forceDownload` option may be deprecated in a future {product-short} release.
-
-.Example `dynamic-plugins.yaml` file configuration to download the remote artifact without a digest check:
-
-[source,yaml]
-----
-plugins:
- - disabled: false
- pullPolicy: Always
- package: 'oci://quay.io/example-org/example-plugin:v1.0.0!internal-backstage-plugin-example'
-----
+To enable the dynamic plugins cache in {product-very-short}, the plugins directory `dynamic-plugins-root` must be a persistent volume.
\ No newline at end of file
diff --git a/modules/dynamic-plugins/con-overriding-core-backend-services.adoc b/modules/dynamic-plugins/con-overriding-core-backend-services.adoc
deleted file mode 100644
index 233c801d28..0000000000
--- a/modules/dynamic-plugins/con-overriding-core-backend-services.adoc
+++ /dev/null
@@ -1,101 +0,0 @@
-[id="overriding-core-backend-services_{context}"]
-= Overriding Core Backend Service Configuration
-
-The {product} ({product-very-short}) backend platform consists of a number of core services that are well encapsulated. The {product-very-short} backend installs these default core services statically during initialization.
-
-You can configure these core services by customizing the backend source code and rebuilding your {product-short} application. Alternatively, you can customize a core service by installing it as a `BackendFeature` by using dynamic plugin functionality.
-
-To use the dynamic plugin functionality to customize a core service in your RHDH application, you must configure the backend to avoid statically installing a given default core service.
-
-For example, adding a middleware function to handle all incoming requests can be done by installing a custom `configure` function for the root `HTTP` router backend service which allows access to the underlying Express application.
-
-.Example of a `BackendFeature` middleware function to handle incoming `HTTP` requests
-
-[source,javascript]
-----
-// Create the BackendFeature
-export const customRootHttpServerFactory: BackendFeature =
- rootHttpRouterServiceFactory({
- configure: ({ app, routes, middleware, logger }) => {
- logger.info(
- 'Using custom root HttpRouterServiceFactory configure function',
- );
- app.use(middleware.helmet());
- app.use(middleware.cors());
- app.use(middleware.compression());
- app.use(middleware.logging());
- // Add a the custom middleware function before all
- // of the route handlers
- app.use(addTestHeaderMiddleware({ logger }));
- app.use(routes);
- app.use(middleware.notFound());
- app.use(middleware.error());
- },
- });
-
-// Export the BackendFeature as the default entrypoint
-export default customRootHttpServerFactory;
-----
-
-In the above example, as the `BackendFeature` overrides the default implementation of the HTTP router service, you must set the `ENABLE_CORE_ROOTHTTPROUTER_OVERRIDE` environment variable to `true` so that the {product-short} does not install the default implementation automatically.
-
-== Overriding environment variables
-To allow a dynamic plugin to load a core service override, you must start the {product-short} backend with the corresponding core service ID environment variable set to `true`.
-
-.Environment variables and core service IDs
-[cols="50%,50%", frame="all", options="header"]
-|===
-|Variable
-|Description
-
-|`ENABLE_CORE_AUTH_OVERRIDE`
-|Override the `core.auth` service
-
-| `ENABLE_CORE_CACHE_OVERRIDE`
-| Override the `core.cache` service
-
-| `ENABLE_CORE_ROOTCONFIG_OVERRIDE`
-| Override the `core.rootConfig` service
-
-| `ENABLE_CORE_DATABASE_OVERRIDE`
-| Override the `core.database` service
-
-| `ENABLE_CORE_DISCOVERY_OVERRIDE`
-| Override the `core.discovery` service
-
-| `ENABLE_CORE_HTTPAUTH_OVERRIDE`
-| Override the `core.httpAuth` service
-
-| `ENABLE_CORE_HTTPROUTER_OVERRIDE`
-| Override the `core.httpRouter` service
-
-| `ENABLE_CORE_LIFECYCLE_OVERRIDE`
-| Override the `core.lifecycle` service
-
-| `ENABLE_CORE_LOGGER_OVERRIDE`
-| Override the `core.logger` service
-
-| `ENABLE_CORE_PERMISSIONS_OVERRIDE`
-| Override the `core.permissions` service
-
-| `ENABLE_CORE_ROOTHEALTH_OVERRIDE`
-| Override the `core.rootHealth` service
-
-| `ENABLE_CORE_ROOTHTTPROUTER_OVERRIDE`
-| Override the `core.rootHttpRouter` service
-
-| `ENABLE_CORE_ROOTLIFECYCLE_OVERRIDE`
-| Override the `core.rootLifecycle` service
-
-| `ENABLE_CORE_SCHEDULER_OVERRIDE`
-| Override the `core.scheduler` service
-
-| `ENABLE_CORE_USERINFO_OVERRIDE`
-| Override the `core.userInfo` service
-
-| `ENABLE_CORE_URLREADER_OVERRIDE`
-| Override the `core.urlReader` service
-
-| `ENABLE_EVENTS_SERVICE_OVERRIDE`
-| Override the `events.service` service
-|===
\ No newline at end of file
diff --git a/modules/dynamic-plugins/con-preinstalled-dynamic-plugins.adoc b/modules/dynamic-plugins/con-preinstalled-dynamic-plugins.adoc
index e26dad13aa..b7e0965a49 100644
--- a/modules/dynamic-plugins/con-preinstalled-dynamic-plugins.adoc
+++ b/modules/dynamic-plugins/con-preinstalled-dynamic-plugins.adoc
@@ -14,7 +14,11 @@ The following preinstalled dynamic plugins are enabled by default:
* `@backstage/plugin-techdocs-module-addons-contrib`
* `@backstage/plugin-techdocs`
* `@red-hat-developer-hub/backstage-plugin-dynamic-home-page`
+* `@red-hat-developer-hub/backstage-plugin-global-floating-action-button`
* `@red-hat-developer-hub/backstage-plugin-global-header`
+* `@red-hat-developer-hub/backstage-plugin-catalog-backend-module-marketplace`
+* `@red-hat-developer-hub/backstage-plugin-marketplace-backend`
+* `@red-hat-developer-hub/backstage-plugin-marketplace`
The dynamic plugins that require custom configuration are disabled by default.
diff --git a/modules/dynamic-plugins/proc-app-grouping.adoc b/modules/dynamic-plugins/proc-app-grouping.adoc
new file mode 100644
index 0000000000..89d7bccb05
--- /dev/null
+++ b/modules/dynamic-plugins/proc-app-grouping.adoc
@@ -0,0 +1,11 @@
+[id="proc-app-grouping"]
+
+= App grouping
+
+To display workload resources such as deployments or pods in a visual group, add the following label:
+
+[source,yaml]
+----
+labels:
+ app.kubernetes.io/part-of:
+----
\ No newline at end of file
diff --git a/modules/dynamic-plugins/proc-creating-a-pvc-for-the-dynamic-plugin-cache-by-using-helm.adoc b/modules/dynamic-plugins/proc-creating-a-pvc-for-the-dynamic-plugin-cache-by-using-helm.adoc
new file mode 100644
index 0000000000..688642d81c
--- /dev/null
+++ b/modules/dynamic-plugins/proc-creating-a-pvc-for-the-dynamic-plugin-cache-by-using-helm.adoc
@@ -0,0 +1,72 @@
+[id="proc-creating-a-pvc-for-the-dynamic-plugin-cache-by-using-helm_{context}"]
+= Creating a PVC for the dynamic plugin cache using the Helm Chart
+For Helm chart installations, if you require the dynamic plugin cache to persist across pod restarts, you must create a persistent volume claim (PVC) and configure the Helm chart to use it.
+
+.Prerequisites
+* You have installed {product} using the Helm chart.
+* You have installed the {openshift-cli}.
+
+.Procedure
+. Create the persistent volume definition. For example:
++
+[source,yaml]
+----
+kind: PersistentVolumeClaim
+apiVersion: v1
+metadata:
+ name: dynamic-plugins-root
+spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 5Gi
+----
++
+[NOTE]
+====
+This example uses `ReadWriteOnce` as the access mode which prevents multiple replicas from sharing the PVC across different nodes.
+To run multiple replicas on different nodes, depending on your storage driver, you must use an access mode such as `ReadWriteMany`.
+====
+
+. To apply this PVC to your cluster, run the following command:
++
+[source,terminal]
+----
+oc apply -f pvc.yaml
+----
+. Configure the Helm chart to use the PVC. For example:
++
+[source,yaml]
+----
+upstream:
+ backstage:
+ extraVolumes:
+ - name: dynamic-plugins-root
+ persistentVolumeClaim:
+ claimName: dynamic-plugins-root
+ - name: dynamic-plugins
+ configMap:
+ defaultMode: 420
+ name: '{{ printf "%s-dynamic-plugins" .Release.Name }}'
+ optional: true
+ - name: dynamic-plugins-npmrc
+ secret:
+ defaultMode: 420
+ optional: true
+ secretName: '{{ printf "%s-dynamic-plugins-npmrc" .Release.Name }}'
+ - name: dynamic-plugins-registry-auth
+ secret:
+ defaultMode: 416
+ optional: true
+ secretName: '{{ printf "%s-dynamic-plugins-registry-auth" .Release.Name }}'
+ - name: npmcacache
+ emptyDir: {}
+ - name: temp
+ emptyDir: {}
+----
++
+[NOTE]
+====
+When you configure the Helm chart to use the PVC, you must also include the link:https://github.com/redhat-developer/rhdh-chart/blob/release-{product-version}/charts/backstage/values.yaml#L145-L181[`extraVolumes`] defined in the default Helm chart.
+====
\ No newline at end of file
diff --git a/modules/dynamic-plugins/proc-creating-a-pvc-for-the-dynamic-plugin-cache-by-using-the-operator.adoc b/modules/dynamic-plugins/proc-creating-a-pvc-for-the-dynamic-plugin-cache-by-using-the-operator.adoc
new file mode 100644
index 0000000000..e0b9e7fc43
--- /dev/null
+++ b/modules/dynamic-plugins/proc-creating-a-pvc-for-the-dynamic-plugin-cache-by-using-the-operator.adoc
@@ -0,0 +1,61 @@
+[id="proc-creating-a-pvc-for-the-dynamic-plugin-cache-by-using-the-operator_{context}"]
+= Creating a PVC for the dynamic plugin cache by using the Operator
+
+For operator-based installations, you must manually create the persistent volume claim (PVC) by replacing the default `dynamic-plugins-root` volume with a PVC named `dynamic-plugins-root`.
+
+.Prerequisites
+* You have installed {product} on {ocp-short} using the {product} Operator.
+* You have installed the {openshift-cli}.
+
+.Procedure
+. Create the persistent volume definition and save it to a file, such as `pvc.yaml`. For example:
++
+[source,yaml]
+----
+kind: PersistentVolumeClaim
+apiVersion: v1
+metadata:
+ name: dynamic-plugins-root
+spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 5Gi
+
+----
++
+[NOTE]
+====
+This example uses `ReadWriteOnce` as the access mode which prevents multiple replicas from sharing the PVC across different nodes.
+To run multiple replicas on different nodes, depending on your storage driver, you must use an access mode such as `ReadWriteMany`.
+====
+. To apply this PVC to your cluster, run the following command:
++
+[source,terminal]
+----
+oc apply -f pvc.yaml
+----
+. Replace the default `dynamic-plugins-root` volume with a PVC named `dynamic-plugins-root`. For example:
++
+[source,yaml]
+----
+apiVersion: rhdh.redhat.com/v1alpha3
+kind: Backstage
+metadata:
+ name: developer-hub
+spec:
+ deployment:
+ patch:
+ spec:
+ template:
+ spec:
+ volumes:
+ - $patch: replace
+ name: dynamic-plugins-root
+ persistentVolumeClaim:
+ claimName: dynamic-plugins-root
+----
++
+[NOTE]
+To avoid adding a new volume, you must use the `$patch: replace` directive.
\ No newline at end of file
diff --git a/modules/dynamic-plugins/proc-enable-users-to-use-topology-plugin.adoc b/modules/dynamic-plugins/proc-enable-users-to-use-topology-plugin.adoc
index 608a89c920..16f26783f3 100644
--- a/modules/dynamic-plugins/proc-enable-users-to-use-topology-plugin.adoc
+++ b/modules/dynamic-plugins/proc-enable-users-to-use-topology-plugin.adoc
@@ -3,7 +3,7 @@
The Topology plugin is defining additional permissions. When link:{authorization-book-url}[{authorization-book-title}] is enabled, to enable users to use the Topology plugin, grant them:
-* The `topology.view.read` `read` permission to view the Topology panel.
+* The `kubernetes.clusters.read` and `kubernetes.resources.read`, `read` permissions to view the Topology panel.
* The `kubernetes.proxy` `use` permission to view the pod logs.
* The `catalog-entity` `read` permission to view the {product} software catalog items.
@@ -16,7 +16,8 @@ The Topology plugin is defining additional permissions. When link:{authorization
[source]
----
g, user:default/, role:default/topology-viewer
-p, role:default/topology-viewer, topology.view.read, read, allow <1>
+p, role:default/topology-viewer, kubernetes.clusters.read, read, allow <1>
+p, role:default/topology-viewer, kubernetes.resources.read, read, allow <1>
p, role:default/topology-viewer, kubernetes.proxy, use, allow <2>
p, role:default/topology-viewer, catalog-entity, read, allow <3>
----
diff --git a/modules/dynamic-plugins/proc-enabling-the-source-code-editor.adoc b/modules/dynamic-plugins/proc-enabling-the-source-code-editor.adoc
new file mode 100644
index 0000000000..6b85f376c5
--- /dev/null
+++ b/modules/dynamic-plugins/proc-enabling-the-source-code-editor.adoc
@@ -0,0 +1,34 @@
+[id="proc-enabling-the-source-code-editor_{context}"]
+= Enabling the source code editor
+
+To enable the source code editor, you must grant read access to the CheClusters resource in the `ClusterRole` as shown in the following example code:
+
+[source,yaml]
+----
+ ...
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ name: backstage-read-only
+ rules:
+ ...
+ - apiGroups:
+ - org.eclipse.che
+ resources:
+ - checlusters
+ verbs:
+ - get
+ - list
+----
+
+To use the source code editor, you must add the following configuration to the `kubernetes.customResources` property in your `{my-app-config-file}` file:
+
+[source,yaml]
+----
+ kubernetes:
+ ...
+ customResources:
+ - group: 'org.eclipse.che'
+ apiVersion: 'v2'
+ plural: 'checlusters'
+----
\ No newline at end of file
diff --git a/modules/dynamic-plugins/proc-entity-annotation-or-label.adoc b/modules/dynamic-plugins/proc-entity-annotation-or-label.adoc
new file mode 100644
index 0000000000..76776a84c6
--- /dev/null
+++ b/modules/dynamic-plugins/proc-entity-annotation-or-label.adoc
@@ -0,0 +1,24 @@
+[id="proc-entity-annotation-or-label"]
+
+= Entity annotation/label
+
+For {product-very-short} to detect that an entity has Kubernetes components, add the following annotation to the `catalog-info.yaml` file of the entity:
+
+[source,yaml]
+----
+annotations:
+ backstage.io/kubernetes-id:
+----
+
+Add the following label to the resources so that the Kubernetes plugin gets the Kubernetes resources from the requested entity:
+
+[source,yaml]
+----
+labels:
+ backstage.io/kubernetes-id: `
+----
+
+[NOTE]
+====
+When using the label selector, the mentioned labels must be present on the resource.
+====
\ No newline at end of file
diff --git a/modules/dynamic-plugins/proc-extensions-installing.adoc b/modules/dynamic-plugins/proc-extensions-installing.adoc
new file mode 100644
index 0000000000..7f3ed66f1b
--- /dev/null
+++ b/modules/dynamic-plugins/proc-extensions-installing.adoc
@@ -0,0 +1,56 @@
+[id="rhdh-extensions-plugins-installing_{context}"]
+= Installing a plugin by using Extensions
+You can install a plugin and configure it by updating the `dynamic-plugins.yaml` file by using *Extensions*.
+
+== Prerequisites
+* You have the necessary permissions to modify plugin configurations and access the application environment.
+* You have identified and set the required environment variables referenced by the plugin's default configuration. These environment variables must be defined in the Helm Chart or Operator configuration.
+
+.Procedure
+. Open your {product-short} application and click *Administration* > *Extensions*.
+. Use the search bar on the *Extensions* page to find the plugin you wish to install, then click on the card. For example, search for Tekton and click *Read more* on the *Pipelines With Tekton* card.
++
+image::rhdh-plugins-reference/rhdh-extensions-tekton-card.png[Extensions catalog with Tekton card]
+. In the plugin drawer, you can review information about the plugin and how to configure it in {product-very-short}. To install the plugin, click *Install*.
++
+image::rhdh-plugins-reference/rhdh-extensions-tekton-details.png[Extensions catalog with Tekton details]
+. On the Install Plugin page, a YAML editor and installation instructions are displayed.
++
+image::rhdh-plugins-reference/rhdh-extensions-tekton-editor-1.png[Extensions catalog with plugin editor template]
+. Click the *About the plugin* tab to view installation and configuration details for the plugin.
+. Click the *Examples* tab to display the default plugin configuration.
+. Click *Apply* to copy the default plugin configuration to the YAML editor.
+. In the YAML editor, click the copy icon to copy the plugin configuration.
++
+image::rhdh-plugins-reference/rhdh-extensions-tekton-editor-2.png[Extensions catalog with Tekton configuration]
++
+[NOTE]
+In {product-very-short} {product-version}, the *Install* button is disabled, so you must copy the plugin configuration to the `dynamic-plugins.yaml` file.
+. In the `dynamic-plugins.yaml` file, add the plugin configuration that you copied in the previous step to the `plugins` definitions.
++
+[NOTE]
+If you have installed {product-very-short} by using the Helm Chart, to enable the plugin, you may need to roll out your {product-very-short} project manually.
+
+.Verification
+. Click on *Administration* > *Extensions*.
+. Go to the *Installed* tab to view a list of installed plugins.
+. Search for the plugin that you installed to confirm that it is available and enabled.
+
+////
+. To disable the the Extensions feature plugins, edit your `dynamic-plugins.yaml` with the following content.
++
+.`dynamic-plugins.yaml` fragment
+[source,yaml]
+----
+plugins:
+ - package: ./dynamic-plugins/dist/red-hat-developer-hub-backstage-plugin-marketplace
+ disabled: true
+ - package: ./dynamic-plugins/dist/red-hat-developer-hub-backstage-plugin-catalog-backend-module-marketplace-dynamic
+ disabled: true
+ - package: ./dynamic-plugins/dist/red-hat-developer-hub-backstage-plugin-marketplace-backend-dynamic
+ disabled: true
+----
+
+[NOTE]
+If you disable the Extensions feature plugins, the *Catalog* and *Installed* tabs will also be removed. You can still view installed plugins by clicking on *Administration* > *Extensions*.
+////
diff --git a/modules/dynamic-plugins/proc-icon-displayed-in-the-node.adoc b/modules/dynamic-plugins/proc-icon-displayed-in-the-node.adoc
new file mode 100644
index 0000000000..a81c526f1b
--- /dev/null
+++ b/modules/dynamic-plugins/proc-icon-displayed-in-the-node.adoc
@@ -0,0 +1,52 @@
+[id="proc-icon-displayed-in-the-node"]
+
+= Icon displayed in the node
+
+To display a runtime icon in the topology nodes, add the following label to workload resources, such as Deployments:
+
+[source,yaml]
+----
+labels:
+ app.openshift.io/runtime:
+----
+Alternatively, you can include the following label to display the runtime icon:
+
+[source,yaml]
+----
+labels:
+ app.kubernetes.io/name:
+----
+
+Supported values of `` include:
+
+* django
+* dotnet
+* drupal
+* go-gopher
+* golang
+* grails
+* jboss
+* jruby
+* js
+* nginx
+* nodejs
+* openjdk
+* perl
+* phalcon
+* php
+* python
+* quarkus
+* rails
+* redis
+* rh-spring-boot
+* rust
+* java
+* rh-openjdk
+* ruby
+* spring
+* spring-boot
+
+[NOTE]
+====
+Other values result in icons not being rendered for the node.
+====
\ No newline at end of file
diff --git a/modules/dynamic-plugins/proc-installing-and-configuring-redis-cache.adoc b/modules/dynamic-plugins/proc-installing-and-configuring-redis-cache.adoc
deleted file mode 100644
index 57f6f79442..0000000000
--- a/modules/dynamic-plugins/proc-installing-and-configuring-redis-cache.adoc
+++ /dev/null
@@ -1,19 +0,0 @@
-[id="proc-installing-and-configuring-redis-cache_{context}"]
-= Using Redis Cache with dynamic plugins
-You can use the Redis cache store to improve {product-very-short} performance and reliability. Plugins in {product-very-short} receive dedicated cache connections, which are powered by Keyv.
-
-== Installing Redis Cache in {product}
-
-.Prerequisites
-* You have installed Red Hat Developer Hub by using either the Operator or Helm chart.
-* You have an active Redis server. For more information on setting up an external Redis server, see the link:https://www.redis.io/docs/latest/[`Redis official documentation`].
-
-.Procedure
-Add the following code to your `{my-app-config-file}` file:
-[source, yaml]
-----
-backend:
- cache:
- store: redis
- connection: redis://user:pass@cache.example.com:6379
-----
diff --git a/modules/dynamic-plugins/proc-label-selector-query-annotation.adoc b/modules/dynamic-plugins/proc-label-selector-query-annotation.adoc
new file mode 100644
index 0000000000..3c36157df2
--- /dev/null
+++ b/modules/dynamic-plugins/proc-label-selector-query-annotation.adoc
@@ -0,0 +1,31 @@
+[id="proc-label-selector-query-annotation"]
+
+= Label selector query annotation
+
+You can write your own custom label, which {product-very-short} uses to find the Kubernetes resources. The label selector takes precedence over the ID annotations:
+
+[source,yaml]
+----
+annotations:
+ backstage.io/kubernetes-label-selector: 'app=my-app,component=front-end'
+----
+
+If you have multiple entities while Red Hat Dev Spaces is configured and want multiple entities to support the edit code decorator that redirects to the Red Hat Dev Spaces instance, you can add the backstage.io/kubernetes-label-selector annotation to the catalog-info.yaml file for each entity.
+
+[source,yaml]
+----
+annotations:
+ backstage.io/kubernetes-label-selector: 'component in (,che)'
+----
+
+If you are using the previous label selector, you must add the following labels to your resources so that the Kubernetes plugin gets the Kubernetes resources from the requested entity:
+
+[source,yaml]
+----
+labels:
+ component: che # add this label to your che cluster instance
+labels:
+ component: # add this label to the other resources associated with your entity
+----
+
+You can also write your own custom query for the label selector with unique labels to differentiate your entities. However, you need to ensure that you add those labels to the resources associated with your entities including your CheCluster instance.
\ No newline at end of file
diff --git a/modules/dynamic-plugins/proc-linking-to-source-code-editor-or-source.adoc b/modules/dynamic-plugins/proc-linking-to-source-code-editor-or-source.adoc
new file mode 100644
index 0000000000..417e1af3d8
--- /dev/null
+++ b/modules/dynamic-plugins/proc-linking-to-source-code-editor-or-source.adoc
@@ -0,0 +1,33 @@
+[id="proc-linking-to-source-code-editor-or-source"]
+
+= Linking to the source code editor or the source
+
+Add the following annotations to workload resources, such as Deployments to navigate to the Git repository of the associated application using the source code editor:
+
+[source,yaml]
+----
+annotations:
+ app.openshift.io/vcs-uri:
+----
+
+Add the following annotation to navigate to a specific branch:
+
+[source,yaml]
+----
+annotations:
+ app.openshift.io/vcs-ref:
+----
+
+[NOTE]
+====
+If Red Hat OpenShift Dev Spaces is installed and configured and Git URL annotations are also added to the workload YAML file, then clicking on the edit code decorator redirects you to the Red Hat OpenShift Dev Spaces instance.
+====
+
+[NOTE]
+====
+When you deploy your application using the OCP Git import flows, then you do not need to add the labels as import flows do that. Otherwise, you need to add the labels manually to the workload YAML file.
+====
+
+//The labels are not similar to `backstage.io/edit-url` annotations as it points to the catalog entity metadata source file and is applied to RHDH catalog entity metadata YAML file, but not Kubernetes resources.
+
+You can also add the `app.openshift.io/edit-url` annotation with the edit URL that you want to access using the decorator.
\ No newline at end of file
diff --git a/modules/dynamic-plugins/proc-load-plugin-oci-image.adoc b/modules/dynamic-plugins/proc-load-plugin-oci-image.adoc
index 0fb9aa17bb..64d42a8312 100644
--- a/modules/dynamic-plugins/proc-load-plugin-oci-image.adoc
+++ b/modules/dynamic-plugins/proc-load-plugin-oci-image.adoc
@@ -7,10 +7,33 @@
For more information about packaging a third-party plugin, see xref:assembly-package-publish-third-party-dynamic-plugin[].
.Procedure
+. To retrieve plugins from an authenticated registry, complete the following steps:
+.. Log in to the container image registry.
++
+[source,yaml]
+----
+podman login
+----
+.. Verify the content of the `auth.json` file created after the login.
++
+[source,yaml]
+----
+cat ${XDG_RUNTIME_DIR:-~/.config}/containers/auth.json
+----
+.. Create a secret file using the following example:
++
+[source,yaml]
+----
+oc create secret generic __ --from-file=auth.json=${XDG_RUNTIME_DIR:-~/.config}/containers/auth.json <1>
+----
++
+** For an Operator-based deployment, replace __ with `dynamic-plugins-registry-auth`.
+** For a Helm-based deployment, replace __ with `_-dynamic-plugins-registry-auth`.
+
. Define the plugin with the `oci://` prefix in the following format in `dynamic-plugins.yaml` file:
+
--
-`oci://:!`
+`oci://__:!__`
.Example configuration in `dynamic-plugins.yaml` file
[source,yaml]
@@ -20,10 +43,7 @@ plugins:
package: oci://quay.io/example/image:v0.0.1!backstage-plugin-myplugin
----
--
-
-. Configure authentication for private registries by setting the `REGISTRY_AUTH_FILE` environment variable to the path of the registry configuration file. For example, `~/.config/containers/auth.json` or `~/.docker/config.json`.
-
-. To perform an integrity check, use the image digest in place of the tag in the `dynamic-plugins.yaml` file as follows:
+. To perform an integrity check, use the image digest in place of the tag in the `dynamic-plugins.yaml` file as shown in the following example:
+
--
.Example configuration in `dynamic-plugins.yaml` file
@@ -34,5 +54,4 @@ plugins:
package: oci://quay.io/example/image@sha256:28036abec4dffc714394e4ee433f16a59493db8017795049c831be41c02eb5dc!backstage-plugin-myplugin
----
--
-
. To apply the changes, restart the {product-very-short} application.
diff --git a/modules/dynamic-plugins/proc-namespace-annotation.adoc b/modules/dynamic-plugins/proc-namespace-annotation.adoc
new file mode 100644
index 0000000000..22d296db83
--- /dev/null
+++ b/modules/dynamic-plugins/proc-namespace-annotation.adoc
@@ -0,0 +1,16 @@
+[id="proc-namespace-annotation"]
+
+= Namespace annotation
+
+.Procedure
+* To identify the Kubernetes resources using the defined namespace, add the `backstage.io/kubernetes-namespace` annotation:
++
+[source,yaml]
+----
+annotations:
+ backstage.io/kubernetes-namespace:
+----
++
+The Red Hat OpenShift Dev Spaces instance is not accessible using the source code editor if the `backstage.io/kubernetes-namespace` annotation is added to the `catalog-info.yaml` file.
++
+To retrieve the instance URL, you require the CheCluster custom resource (CR). As the CheCluster CR is created in the openshift-devspaces namespace, the instance URL is not retrieved if the namespace annotation value is not openshift-devspaces.
\ No newline at end of file
diff --git a/modules/dynamic-plugins/proc-node-connector.adoc b/modules/dynamic-plugins/proc-node-connector.adoc
new file mode 100644
index 0000000000..961857e174
--- /dev/null
+++ b/modules/dynamic-plugins/proc-node-connector.adoc
@@ -0,0 +1,12 @@
+[id="proc-node-connector"]
+
+= Node connector
+
+.Procedure
+To display the workload resources such as deployments or pods with a visual connector, add the following annotation:
++
+[source,yaml]
+----
+annotations:
+ app.openshift.io/connects-to: '[{"apiVersion": ,"kind": ,"name": }]'
+----
\ No newline at end of file
diff --git a/modules/dynamic-plugins/proc-overriding-core-backend-services.adoc b/modules/dynamic-plugins/proc-overriding-core-backend-services.adoc
new file mode 100644
index 0000000000..37da22ea01
--- /dev/null
+++ b/modules/dynamic-plugins/proc-overriding-core-backend-services.adoc
@@ -0,0 +1,100 @@
+[id="overriding-core-backend-services_{context}"]
+= Overriding Core Backend Service Configuration
+
+The {product} ({product-very-short}) backend platform consists of a number of core services that are well encapsulated.
+The {product-very-short} backend installs these default core services statically during initialization.
+
+Customize a core service by installing it as a `BackendFeature` by using the dynamic plugin functionality.
+
+.Procedure
+. Configure {product-short} to allow a core service override, by setting the corresponding core service ID environment variable to `true` in the {product-short} `{my-app-config-file}` configuration file.
++
+.Environment variables and core service IDs
+[cols="50%,50%",frame="all",options="header"]
+|===
+|Variable
+|Overrides the related service
+
+|`ENABLE_CORE_AUTH_OVERRIDE`
+|`core.auth`
+
+| `ENABLE_CORE_CACHE_OVERRIDE`
+| `core.cache`
+
+| `ENABLE_CORE_ROOTCONFIG_OVERRIDE`
+| `core.rootConfig`
+
+| `ENABLE_CORE_DATABASE_OVERRIDE`
+| `core.database`
+
+| `ENABLE_CORE_DISCOVERY_OVERRIDE`
+| `core.discovery`
+
+| `ENABLE_CORE_HTTPAUTH_OVERRIDE`
+| `core.httpAuth`
+
+| `ENABLE_CORE_HTTPROUTER_OVERRIDE`
+| `core.httpRouter`
+
+| `ENABLE_CORE_LIFECYCLE_OVERRIDE`
+| `core.lifecycle`
+
+| `ENABLE_CORE_LOGGER_OVERRIDE`
+| `core.logger`
+
+| `ENABLE_CORE_PERMISSIONS_OVERRIDE`
+| `core.permissions`
+
+| `ENABLE_CORE_ROOTHEALTH_OVERRIDE`
+| `core.rootHealth`
+
+| `ENABLE_CORE_ROOTHTTPROUTER_OVERRIDE`
+| `core.rootHttpRouter`
+
+| `ENABLE_CORE_ROOTLIFECYCLE_OVERRIDE`
+| `core.rootLifecycle`
+
+| `ENABLE_CORE_SCHEDULER_OVERRIDE`
+| `core.scheduler`
+
+| `ENABLE_CORE_USERINFO_OVERRIDE`
+| `core.userInfo`
+
+| `ENABLE_CORE_URLREADER_OVERRIDE`
+| `core.urlReader`
+
+| `ENABLE_EVENTS_SERVICE_OVERRIDE`
+| `events.service`
+|===
+
+. Install your custom core service as a `BackendFeature` as shown in the following example:
+
+.Example of a `BackendFeature` middleware function to handle incoming `HTTP` requests
+[source,javascript]
+----
+// Create the BackendFeature
+export const customRootHttpServerFactory: BackendFeature =
+ rootHttpRouterServiceFactory({
+ configure: ({ app, routes, middleware, logger }) => {
+ logger.info(
+ 'Using custom root HttpRouterServiceFactory configure function',
+ );
+ app.use(middleware.helmet());
+ app.use(middleware.cors());
+ app.use(middleware.compression());
+ app.use(middleware.logging());
+ // Add a the custom middleware function before all
+ // of the route handlers
+ app.use(addTestHeaderMiddleware({ logger }));
+ app.use(routes);
+ app.use(middleware.notFound());
+ app.use(middleware.error());
+ },
+ });
+
+// Export the BackendFeature as the default entrypoint
+export default customRootHttpServerFactory;
+----
++
+In the previous example, as the `BackendFeature` overrides the default implementation of the HTTP router service, you must set the `ENABLE_CORE_ROOTHTTPROUTER_OVERRIDE` environment variable to `true` so that the {product-short} does not install the default implementation automatically.
+
diff --git a/modules/dynamic-plugins/proc-topology-configure.adoc b/modules/dynamic-plugins/proc-topology-configure.adoc
deleted file mode 100644
index 96781c5fd6..0000000000
--- a/modules/dynamic-plugins/proc-topology-configure.adoc
+++ /dev/null
@@ -1,333 +0,0 @@
-= Configuration
-
-== Viewing OpenShift routes
-To view OpenShift routes, you must grant read access to the routes resource in the Cluster Role:
-
-[source,yaml]
-----
- apiVersion: rbac.authorization.k8s.io/v1
- kind: ClusterRole
- metadata:
- name: backstage-read-only
- rules:
- ...
- - apiGroups:
- - route.openshift.io
- resources:
- - routes
- verbs:
- - get
- - list
-----
-
-You must also add the following in `kubernetes.customResources` property in your `{my-app-config-file}` file:
-
-[source,yaml]
-----
-kubernetes:
- ...
- customResources:
- - group: 'route.openshift.io'
- apiVersion: 'v1'
- plural: 'routes'
-----
-
-== Viewing pod logs
-To view pod logs, you must grant the following permission to the `ClusterRole`:
-
-[source,yaml]
-----
- apiVersion: rbac.authorization.k8s.io/v1
- kind: ClusterRole
- metadata:
- name: backstage-read-only
- rules:
- ...
- - apiGroups:
- - ''
- resources:
- - pods
- - pods/log
- verbs:
- - get
- - list
- - watch
-----
-
-== Viewing Tekton PipelineRuns
-To view the Tekton PipelineRuns you must grant read access to the `pipelines`, `pipelinesruns`, and `taskruns` resources in the `ClusterRole`:
-
-[source,yaml]
-----
- ...
- apiVersion: rbac.authorization.k8s.io/v1
- kind: ClusterRole
- metadata:
- name: backstage-read-only
- rules:
- ...
- - apiGroups:
- - tekton.dev
- resources:
- - pipelines
- - pipelineruns
- - taskruns
- verbs:
- - get
- - list
-----
-
-To view the Tekton PipelineRuns list in the side panel and the latest PipelineRuns status in the Topology node decorator, you must add the following code to the `kubernetes.customResources` property in your `{my-app-config-file}` file:
-
-[source,yaml]
-----
-kubernetes:
- ...
- customResources:
- - group: 'tekton.dev'
- apiVersion: 'v1'
- plural: 'pipelines'
- - group: 'tekton.dev'
- apiVersion: 'v1'
- plural: 'pipelineruns'
- - group: 'tekton.dev'
- apiVersion: 'v1'
- plural: 'taskruns'
-----
-
-== Viewing virtual machines
-To view virtual machines, the OpenShift Virtualization operator must be installed and configured on a Kubernetes cluster.
-You must also grant read access to the `VirtualMachines` resource in the `ClusterRole`:
-
-[source,yaml]
-----
- ...
- apiVersion: rbac.authorization.k8s.io/v1
- kind: ClusterRole
- metadata:
- name: backstage-read-only
- rules:
- ...
- - apiGroups:
- - kubevirt.io
- resources:
- - virtualmachines
- - virtualmachineinstances
- verbs:
- - get
- - list
-----
-
-To view the virtual machine nodes on the topology plugin, you must add the following code to the `kubernetes.customResources` property in the `{my-app-config-file}` file:
-
-[source,yaml]
-----
-kubernetes:
- ...
- customResources:
- - group: 'kubevirt.io'
- apiVersion: 'v1'
- plural: 'virtualmachines'
- - group: 'kubevirt.io'
- apiVersion: 'v1'
- plural: 'virtualmachineinstances'
-----
-
-== Enabling the source code editor
-To enable the source code editor, you must grant read access to the CheClusters resource in the `ClusterRole` as shown in the following example code:
-
-[source,yaml]
-----
- ...
- apiVersion: rbac.authorization.k8s.io/v1
- kind: ClusterRole
- metadata:
- name: backstage-read-only
- rules:
- ...
- - apiGroups:
- - org.eclipse.che
- resources:
- - checlusters
- verbs:
- - get
- - list
-----
-
-To use the source code editor, you must add the following configuration to the `kubernetes.customResources` property in your `{my-app-config-file}` file:
-
-[source,yaml]
-----
- kubernetes:
- ...
- customResources:
- - group: 'org.eclipse.che'
- apiVersion: 'v2'
- plural: 'checlusters'
-----
-
-== Labels and annotations
-=== Linking to the source code editor or the source
-Add the following annotations to workload resources, such as Deployments to navigate to the Git repository of the associated application using the source code editor:
-
-[source,yaml]
-----
-annotations:
- app.openshift.io/vcs-uri:
-----
-
-Add the following annotation to navigate to a specific branch:
-
-[source,yaml]
-----
-annotations:
- app.openshift.io/vcs-ref:
-----
-
-[NOTE]
-====
-If Red Hat OpenShift Dev Spaces is installed and configured and git URL annotations are also added to the workload YAML file, then clicking on the edit code decorator redirects you to the Red Hat OpenShift Dev Spaces instance.
-====
-
-[NOTE]
-====
-When you deploy your application using the OCP Git import flows, then you do not need to add the labels as import flows do that. Otherwise, you need to add the labels manually to the workload YAML file.
-====
-
-//The labels are not similar to `backstage.io/edit-url` annotations as it points to the catalog entity metadata source file and is applied to RHDH catalog entity metadata YAML file, but not Kubernetes resources.
-
-You can also add the `app.openshift.io/edit-url` annotation with the edit URL that you want to access using the decorator.
-
-=== Entity annotation/label
-For RHDH to detect that an entity has Kubernetes components, add the following annotation to the entity's `catalog-info.yaml`:
-
-[source,yaml]
-----
-annotations:
- backstage.io/kubernetes-id:
-----
-
-The following label is added to the resources so that the Kubernetes plugin gets the Kubernetes resources from the requested entity, add the following label to the resources:
-
-[source,yaml]
-----
-labels:
- backstage.io/kubernetes-id: `
-----
-
-[NOTE]
-====
-When using the label selector, the mentioned labels must be present on the resource.
-====
-
-=== Namespace annotation
-To identify the Kubernetes resources using the defined namespace, add the `backstage.io/kubernetes-namespace` annotation:
-
-[source,yaml]
-----
-annotations:
- backstage.io/kubernetes-namespace:
-----
-
-The Red Hat OpenShift Dev Spaces instance is not accessible using the source code editor if the `backstage.io/kubernetes-namespace` annotation is added to the `catalog-info.yaml` file.
-
-To retrieve the instance URL, you require the CheCluster custom resource (CR). As the CheCluster CR is created in the openshift-devspaces namespace, the instance URL is not retrieved if the namespace annotation value is not openshift-devspaces.
-
-=== Label selector query annotation
-You can write your own custom label, which RHDH uses to find the Kubernetes resources. The label selector takes precedence over the ID annotations:
-
-[source,yaml]
-----
-annotations:
- backstage.io/kubernetes-label-selector: 'app=my-app,component=front-end'
-----
-
-If you have multiple entities while Red Hat Dev Spaces is configured and want multiple entities to support the edit code decorator that redirects to the Red Hat Dev Spaces instance, you can add the backstage.io/kubernetes-label-selector annotation to the catalog-info.yaml file for each entity.
-
-[source,yaml]
-----
-annotations:
- backstage.io/kubernetes-label-selector: 'component in (,che)'
-----
-
-If you are using the previous label selector, you must add the following labels to your resources so that the Kubernetes plugin gets the Kubernetes resources from the requested entity:
-
-[source,yaml]
-----
-labels:
- component: che # add this label to your che cluster instance
-labels:
- component: # add this label to the other resources associated with your entity
-----
-
-You can also write your own custom query for the label selector with unique labels to differentiate your entities. However, you need to ensure that you add those labels to the resources associated with your entities including your CheCluster instance.
-
-=== Icon displayed in the node
-To display a runtime icon in the topology nodes, add the following label to workload resources, such as Deployments:
-
-[source,yaml]
-----
-labels:
- app.openshift.io/runtime:
-----
-Alternatively, you can include the following label to display the runtime icon:
-
-[source,yaml]
-----
-labels:
- app.kubernetes.io/name:
-----
-
-Supported values of `` include:
-
-* django
-* dotnet
-* drupal
-* go-gopher
-* golang
-* grails
-* jboss
-* jruby
-* js
-* nginx
-* nodejs
-* openjdk
-* perl
-* phalcon
-* php
-* python
-* quarkus
-* rails
-* redis
-* rh-spring-boot
-* rust
-* java
-* rh-openjdk
-* ruby
-* spring
-* spring-boot
-
-[NOTE]
-====
-Other values result in icons not being rendered for the node.
-====
-
-=== App grouping
-To display workload resources such as deployments or pods in a visual group, add the following label:
-
-[source,yaml]
-----
-labels:
- app.kubernetes.io/part-of:
-----
-
-=== Node connector
-To display the workload resources such as deployments or pods with a visual connector, add the following annotation:
-
-[source,yaml]
-----
-annotations:
- app.openshift.io/connects-to: '[{"apiVersion": ,"kind": ,"name": }]'
-----
-
-For more information about the labels and annotations, see _Guidelines for labels and annotations for OpenShift applications_.
diff --git a/modules/dynamic-plugins/proc-viewing-openshift-routes.adoc b/modules/dynamic-plugins/proc-viewing-openshift-routes.adoc
new file mode 100644
index 0000000000..81322932ea
--- /dev/null
+++ b/modules/dynamic-plugins/proc-viewing-openshift-routes.adoc
@@ -0,0 +1,33 @@
+[id="proc-viewing-openshift-routes_{context}"]
+= Viewing OpenShift routes
+
+.Procedure
+. To view OpenShift routes, grant read access to the routes resource in the Cluster Role:
++
+[source,yaml]
+----
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ name: backstage-read-only
+ rules:
+ ...
+ - apiGroups:
+ - route.openshift.io
+ resources:
+ - routes
+ verbs:
+ - get
+ - list
+----
+. Also add the following in `kubernetes.customResources` property in your `{my-app-config-file}` file:
++
+[source,yaml]
+----
+kubernetes:
+ ...
+ customResources:
+ - group: 'route.openshift.io'
+ apiVersion: 'v1'
+ plural: 'routes'
+----
\ No newline at end of file
diff --git a/modules/dynamic-plugins/proc-viewing-pod-logs.adoc b/modules/dynamic-plugins/proc-viewing-pod-logs.adoc
new file mode 100644
index 0000000000..083428bb2c
--- /dev/null
+++ b/modules/dynamic-plugins/proc-viewing-pod-logs.adoc
@@ -0,0 +1,24 @@
+[id="proc-viewing-pod-logs_{context}"]
+= Viewing pod logs
+
+.Procedure
+* To view pod logs, you must grant the following permission to the `ClusterRole`:
++
+[source,yaml]
+----
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ name: backstage-read-only
+ rules:
+ ...
+ - apiGroups:
+ - ''
+ resources:
+ - pods
+ - pods/log
+ verbs:
+ - get
+ - list
+ - watch
+----
\ No newline at end of file
diff --git a/modules/dynamic-plugins/proc-viewing-tekton-pipelineruns.adoc b/modules/dynamic-plugins/proc-viewing-tekton-pipelineruns.adoc
new file mode 100644
index 0000000000..320013681b
--- /dev/null
+++ b/modules/dynamic-plugins/proc-viewing-tekton-pipelineruns.adoc
@@ -0,0 +1,42 @@
+[id="proc-viewing-tekton-pipelineruns_{context}"]
+= Viewing Tekton PipelineRuns
+
+.Procedure
+. To view the Tekton PipelineRuns, grant read access to the `pipelines`, `pipelinesruns`, and `taskruns` resources in the `ClusterRole`:
++
+[source,yaml]
+----
+ ...
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ name: backstage-read-only
+ rules:
+ ...
+ - apiGroups:
+ - tekton.dev
+ resources:
+ - pipelines
+ - pipelineruns
+ - taskruns
+ verbs:
+ - get
+ - list
+----
+. To view the Tekton PipelineRuns list in the side panel and the latest PipelineRuns status in the Topology node decorator, add the following code to the `kubernetes.customResources` property in your `{my-app-config-file}` file:
++
+[source,yaml]
+----
+kubernetes:
+ ...
+ customResources:
+ - group: 'tekton.dev'
+ apiVersion: 'v1'
+ plural: 'pipelines'
+ - group: 'tekton.dev'
+ apiVersion: 'v1'
+ plural: 'pipelineruns'
+ - group: 'tekton.dev'
+ apiVersion: 'v1'
+ plural: 'taskruns'
+----
\ No newline at end of file
diff --git a/modules/dynamic-plugins/proc-viewing-virtual-machines.adoc b/modules/dynamic-plugins/proc-viewing-virtual-machines.adoc
new file mode 100644
index 0000000000..a0a51e02e8
--- /dev/null
+++ b/modules/dynamic-plugins/proc-viewing-virtual-machines.adoc
@@ -0,0 +1,40 @@
+[id="proc-viewing-virtual-machines_{context}"]
+= Viewing virtual machines
+
+.Prerequisites
+. The OpenShift Virtualization operator is installed and configured on a Kubernetes cluster.
+.Procedure
+. Grant read access to the `VirtualMachines` resource in the `ClusterRole`:
++
+[source,yaml]
+----
+ ...
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ name: backstage-read-only
+ rules:
+ ...
+ - apiGroups:
+ - kubevirt.io
+ resources:
+ - virtualmachines
+ - virtualmachineinstances
+ verbs:
+ - get
+ - list
+----
+. To view the virtual machine nodes on the topology plugin, add the following code to the `kubernetes.customResources` property in the `{my-app-config-file}` file:
++
+[source,yaml]
+----
+kubernetes:
+ ...
+ customResources:
+ - group: 'kubevirt.io'
+ apiVersion: 'v1'
+ plural: 'virtualmachines'
+ - group: 'kubevirt.io'
+ apiVersion: 'v1'
+ plural: 'virtualmachineinstances'
+----
\ No newline at end of file
diff --git a/modules/dynamic-plugins/ref-community-plugins.adoc b/modules/dynamic-plugins/ref-community-plugins.adoc
index 47b3081b90..64a94911b3 100644
--- a/modules/dynamic-plugins/ref-community-plugins.adoc
+++ b/modules/dynamic-plugins/ref-community-plugins.adoc
@@ -10,17 +10,9 @@
Details on how {company-name} provides support for bundled community dynamic plugins are available on the https://access.redhat.com/policy/developerhub-support-policy[Red Hat Developer Support Policy] page.
====
-{product-very-short} includes the following 2 community plugins:
+{product-very-short} includes the following 0 community plugins:
[%header,cols=4*]
|===
|*Name* |*Plugin* |*Version* |*Path and required variables*
-|Argo CD |`https://npmjs.com/package/@roadiehq/backstage-plugin-argo-cd/v/2.8.4[@roadiehq/backstage-plugin-argo-cd]` |2.8.4
-|`./dynamic-plugins/dist/roadiehq-backstage-plugin-argo-cd`
-
-
-|Global Floating Action Button |`https://npmjs.com/package/@red-hat-developer-hub/backstage-plugin-global-floating-action-button/v/1.0.0[@red-hat-developer-hub/backstage-plugin-global-floating-action-button]` |1.0.0
-|`./dynamic-plugins/dist/red-hat-developer-hub-backstage-plugin-global-floating-action-button`
-
-
|===
diff --git a/modules/dynamic-plugins/ref-configuring-the-dynamic-plugins-cache.adoc b/modules/dynamic-plugins/ref-configuring-the-dynamic-plugins-cache.adoc
new file mode 100644
index 0000000000..60a772b758
--- /dev/null
+++ b/modules/dynamic-plugins/ref-configuring-the-dynamic-plugins-cache.adoc
@@ -0,0 +1,23 @@
+[id="ref-configuring-the-dynamic-plugins-cache_{context}"]
+= Configuring the dynamic plugins cache
+You can set the following optional dynamic plugin cache parameters in your `dynamic-plugins.yaml` file:
+
+* `forceDownload`: Set the value to `true` to force a reinstall of the plugin, bypassing the cache. The default value is `false`.
+
+* `pullPolicy`: Similar to the `forceDownload` parameter and is consistent with other image container platforms. You can use one of the following values for this key:
+
+** `Always`: This value compares the image digest in the remote registry and downloads the artifact if it has changed, even if the plugin was previously downloaded.
+** `IfNotPresent`: This value downloads the artifact if it is not already present in the dynamic-plugins-root folder, without checking image digests.
++
+[NOTE]
+The `pullPolicy` setting is also applied to the NPM downloading method, although `Always` will download the remote artifact without a digest check. The existing `forceDownload` option remains functional, however, the `pullPolicy` option takes precedence. The `forceDownload` option may be deprecated in a future {product-short} release.
+
+.Example `dynamic-plugins.yaml` file configuration to download the remote artifact without a digest check:
+
+[source,yaml]
+----
+plugins:
+ - disabled: false
+ pullPolicy: Always
+ package: 'oci://quay.io/example-org/example-plugin:v1.0.0!internal-backstage-plugin-example'
+----
\ No newline at end of file
diff --git a/modules/dynamic-plugins/ref-rh-supported-plugins.adoc b/modules/dynamic-plugins/ref-rh-supported-plugins.adoc
index 8ecea82a94..66563c9de4 100644
--- a/modules/dynamic-plugins/ref-rh-supported-plugins.adoc
+++ b/modules/dynamic-plugins/ref-rh-supported-plugins.adoc
@@ -3,12 +3,12 @@
= {company-name} supported plugins
-{company-name} supports the following 21 plugins:
+{company-name} supports the following 24 plugins:
[%header,cols=4*]
|===
|*Name* |*Plugin* |*Version* |*Path and required variables*
-|Analytics Provider Segment |`https://npmjs.com/package/@backstage-community/plugin-analytics-provider-segment/v/1.12.0[@backstage-community/plugin-analytics-provider-segment]` |1.12.0
+|Analytics Provider Segment |`https://npmjs.com/package/@backstage-community/plugin-analytics-provider-segment/v/1.13.0[@backstage-community/plugin-analytics-provider-segment]` |1.13.0
|`./dynamic-plugins/dist/backstage-community-plugin-analytics-provider-segment`
`SEGMENT_WRITE_KEY`
@@ -16,7 +16,11 @@
`SEGMENT_TEST_MODE`
-|Argo CD |`https://npmjs.com/package/@roadiehq/backstage-plugin-argo-cd-backend/v/3.2.3[@roadiehq/backstage-plugin-argo-cd-backend]` |3.2.3
+|Argo CD |`https://npmjs.com/package/@roadiehq/backstage-plugin-argo-cd/v/2.8.4[@roadiehq/backstage-plugin-argo-cd]` |2.8.4
+|`./dynamic-plugins/dist/roadiehq-backstage-plugin-argo-cd`
+
+
+|Argo CD |`https://npmjs.com/package/@roadiehq/backstage-plugin-argo-cd-backend/v/4.2.0[@roadiehq/backstage-plugin-argo-cd-backend]` |4.2.0
|`./dynamic-plugins/dist/roadiehq-backstage-plugin-argo-cd-backend-dynamic`
`ARGOCD_USERNAME`
@@ -32,17 +36,21 @@
`ARGOCD_AUTH_TOKEN2`
-|Dynamic Home Page |`https://npmjs.com/package/@red-hat-developer-hub/backstage-plugin-dynamic-home-page/v/1.1.0[@red-hat-developer-hub/backstage-plugin-dynamic-home-page]` |1.1.0
+|Dynamic Home Page |`https://npmjs.com/package/@red-hat-developer-hub/backstage-plugin-dynamic-home-page/v/1.3.0[@red-hat-developer-hub/backstage-plugin-dynamic-home-page]` |1.3.0
|`./dynamic-plugins/dist/red-hat-developer-hub-backstage-plugin-dynamic-home-page`
-|GitHub |`https://npmjs.com/package/@backstage/plugin-catalog-backend-module-github/v/0.7.9[@backstage/plugin-catalog-backend-module-github]` |0.7.9
+|GitHub |`https://npmjs.com/package/@backstage/plugin-catalog-backend-module-github/v/0.7.10[@backstage/plugin-catalog-backend-module-github]` |0.7.10
|`./dynamic-plugins/dist/backstage-plugin-catalog-backend-module-github-dynamic`
`GITHUB_ORG`
-|GitHub Org |`https://npmjs.com/package/@backstage/plugin-catalog-backend-module-github-org/v/0.3.6[@backstage/plugin-catalog-backend-module-github-org]` |0.3.6
+|GitHub |`https://npmjs.com/package/@backstage/plugin-scaffolder-backend-module-github/v/0.6.0[@backstage/plugin-scaffolder-backend-module-github]` |0.6.0
+|`./dynamic-plugins/dist/backstage-plugin-scaffolder-backend-module-github-dynamic`
+
+
+|GitHub Org |`https://npmjs.com/package/@backstage/plugin-catalog-backend-module-github-org/v/0.3.7[@backstage/plugin-catalog-backend-module-github-org]` |0.3.7
|`./dynamic-plugins/dist/backstage-plugin-catalog-backend-module-github-org-dynamic`
`GITHUB_URL`
@@ -50,11 +58,15 @@
`GITHUB_ORG`
-|Global Header |`https://npmjs.com/package/@red-hat-developer-hub/backstage-plugin-global-header/v/1.0.0[@red-hat-developer-hub/backstage-plugin-global-header]` |1.0.0
+|Global Floating Action Button |`https://npmjs.com/package/@red-hat-developer-hub/backstage-plugin-global-floating-action-button/v/1.1.4[@red-hat-developer-hub/backstage-plugin-global-floating-action-button]` |1.1.4
+|`./dynamic-plugins/dist/red-hat-developer-hub-backstage-plugin-global-floating-action-button`
+
+
+|Global Header |`https://npmjs.com/package/@red-hat-developer-hub/backstage-plugin-global-header/v/1.5.0[@red-hat-developer-hub/backstage-plugin-global-header]` |1.5.0
|`./dynamic-plugins/dist/red-hat-developer-hub-backstage-plugin-global-header`
-|Keycloak |`https://npmjs.com/package/@backstage-community/plugin-catalog-backend-module-keycloak/v/3.7.0[@backstage-community/plugin-catalog-backend-module-keycloak]` |3.7.0
+|Keycloak |`https://npmjs.com/package/@backstage-community/plugin-catalog-backend-module-keycloak/v/3.10.0[@backstage-community/plugin-catalog-backend-module-keycloak]` |3.10.0
|`./dynamic-plugins/dist/backstage-community-plugin-catalog-backend-module-keycloak-dynamic`
`KEYCLOAK_BASE_URL`
@@ -68,7 +80,7 @@
`KEYCLOAK_CLIENT_SECRET`
-|Kubernetes |`https://npmjs.com/package/@backstage/plugin-kubernetes-backend/v/0.19.2[@backstage/plugin-kubernetes-backend]` |0.19.2
+|Kubernetes |`https://npmjs.com/package/@backstage/plugin-kubernetes-backend/v/0.19.3[@backstage/plugin-kubernetes-backend]` |0.19.3
|`./dynamic-plugins/dist/backstage-plugin-kubernetes-backend-dynamic`
`K8S_CLUSTER_NAME`
@@ -78,15 +90,15 @@
`K8S_CLUSTER_TOKEN`
-|Kubernetes |`https://npmjs.com/package/@backstage-community/plugin-scaffolder-backend-module-kubernetes/v/2.5.0[@backstage-community/plugin-scaffolder-backend-module-kubernetes]` |2.5.0
+|Kubernetes |`https://npmjs.com/package/@backstage-community/plugin-scaffolder-backend-module-kubernetes/v/2.6.0[@backstage-community/plugin-scaffolder-backend-module-kubernetes]` |2.6.0
|`./dynamic-plugins/dist/backstage-community-plugin-scaffolder-backend-module-kubernetes-dynamic`
-|OCM |`https://npmjs.com/package/@backstage-community/plugin-ocm/v/5.3.0[@backstage-community/plugin-ocm]` |5.3.0
+|OCM |`https://npmjs.com/package/@backstage-community/plugin-ocm/v/5.4.0[@backstage-community/plugin-ocm]` |5.4.0
|`./dynamic-plugins/dist/backstage-community-plugin-ocm`
-|OCM |`https://npmjs.com/package/@backstage-community/plugin-ocm-backend/v/5.4.0[@backstage-community/plugin-ocm-backend]` |5.4.0
+|OCM |`https://npmjs.com/package/@backstage-community/plugin-ocm-backend/v/5.5.0[@backstage-community/plugin-ocm-backend]` |5.5.0
|`./dynamic-plugins/dist/backstage-community-plugin-ocm-backend-dynamic`
`OCM_HUB_NAME`
@@ -96,43 +108,43 @@
`OCM_SA_TOKEN`
-|Quay |`https://npmjs.com/package/@backstage-community/plugin-quay/v/1.18.1[@backstage-community/plugin-quay]` |1.18.1
+|Quay |`https://npmjs.com/package/@backstage-community/plugin-quay/v/1.19.0[@backstage-community/plugin-quay]` |1.19.0
|`./dynamic-plugins/dist/backstage-community-plugin-quay`
-|Quay |`https://npmjs.com/package/@backstage-community/plugin-scaffolder-backend-module-quay/v/2.4.0[@backstage-community/plugin-scaffolder-backend-module-quay]` |2.4.0
+|Quay |`https://npmjs.com/package/@backstage-community/plugin-scaffolder-backend-module-quay/v/2.6.0[@backstage-community/plugin-scaffolder-backend-module-quay]` |2.6.0
|`./dynamic-plugins/dist/backstage-community-plugin-scaffolder-backend-module-quay-dynamic`
-|RBAC |`https://npmjs.com/package/@backstage-community/plugin-rbac/v/1.38.2[@backstage-community/plugin-rbac]` |1.38.2
+|RBAC |`https://npmjs.com/package/@backstage-community/plugin-rbac/v/1.39.2[@backstage-community/plugin-rbac]` |1.39.2
|`./dynamic-plugins/dist/backstage-community-plugin-rbac`
-|Regex |`https://npmjs.com/package/@backstage-community/plugin-scaffolder-backend-module-regex/v/2.4.0[@backstage-community/plugin-scaffolder-backend-module-regex]` |2.4.0
+|Regex |`https://npmjs.com/package/@backstage-community/plugin-scaffolder-backend-module-regex/v/2.5.0[@backstage-community/plugin-scaffolder-backend-module-regex]` |2.5.0
|`./dynamic-plugins/dist/backstage-community-plugin-scaffolder-backend-module-regex-dynamic`
-|Signals |`https://npmjs.com/package/@backstage/plugin-signals-backend/v/0.3.0[@backstage/plugin-signals-backend]` |0.3.0
+|Signals |`https://npmjs.com/package/@backstage/plugin-signals-backend/v/0.3.1[@backstage/plugin-signals-backend]` |0.3.1
|`./dynamic-plugins/dist/backstage-plugin-signals-backend-dynamic`
-|TechDocs |`https://npmjs.com/package/@backstage/plugin-techdocs/v/1.12.2[@backstage/plugin-techdocs]` |1.12.2
+|TechDocs |`https://npmjs.com/package/@backstage/plugin-techdocs/v/1.12.3[@backstage/plugin-techdocs]` |1.12.3
|`./dynamic-plugins/dist/backstage-plugin-techdocs`
-|TechDocs |`https://npmjs.com/package/@backstage/plugin-techdocs-backend/v/1.11.5[@backstage/plugin-techdocs-backend]` |1.11.5
+|TechDocs |`https://npmjs.com/package/@backstage/plugin-techdocs-backend/v/1.11.6[@backstage/plugin-techdocs-backend]` |1.11.6
|`./dynamic-plugins/dist/backstage-plugin-techdocs-backend-dynamic`
-|TechDocs Module Addons Contrib |`https://npmjs.com/package/@backstage/plugin-techdocs-module-addons-contrib/v/1.1.20[@backstage/plugin-techdocs-module-addons-contrib]` |1.1.20
+|TechDocs Module Addons Contrib |`https://npmjs.com/package/@backstage/plugin-techdocs-module-addons-contrib/v/1.1.21[@backstage/plugin-techdocs-module-addons-contrib]` |1.1.21
|`./dynamic-plugins/dist/backstage-plugin-techdocs-module-addons-contrib`
-|Tekton |`https://npmjs.com/package/@backstage-community/plugin-tekton/v/3.19.0[@backstage-community/plugin-tekton]` |3.19.0
+|Tekton |`https://npmjs.com/package/@backstage-community/plugin-tekton/v/3.22.0[@backstage-community/plugin-tekton]` |3.22.0
|`./dynamic-plugins/dist/backstage-community-plugin-tekton`
-|Topology |`https://npmjs.com/package/@backstage-community/plugin-topology/v/1.32.0[@backstage-community/plugin-topology]` |1.32.0
+|Topology |`https://npmjs.com/package/@backstage-community/plugin-topology/v/2.0.0[@backstage-community/plugin-topology]` |2.0.0
|`./dynamic-plugins/dist/backstage-community-plugin-topology`
diff --git a/modules/dynamic-plugins/ref-rh-tech-preview-plugins.adoc b/modules/dynamic-plugins/ref-rh-tech-preview-plugins.adoc
index 170383fdd5..1b87654862 100644
--- a/modules/dynamic-plugins/ref-rh-tech-preview-plugins.adoc
+++ b/modules/dynamic-plugins/ref-rh-tech-preview-plugins.adoc
@@ -3,12 +3,12 @@
= {company-name} Technology Preview plugins
-{company-name} provides Technology Preview support for the following 56 plugins:
+{company-name} provides Technology Preview support for the following 55 plugins:
[%header,cols=4*]
|===
|*Name* |*Plugin* |*Version* |*Path and required variables*
-|3scale |`https://npmjs.com/package/@backstage-community/plugin-3scale-backend/v/3.2.0[@backstage-community/plugin-3scale-backend]` |3.2.0
+|3scale |`https://npmjs.com/package/@backstage-community/plugin-3scale-backend/v/3.3.0[@backstage-community/plugin-3scale-backend]` |3.3.0
|`./dynamic-plugins/dist/backstage-community-plugin-3scale-backend-dynamic`
`THREESCALE_BASE_URL`
@@ -16,11 +16,11 @@
`THREESCALE_ACCESS_TOKEN`
-|ACR |`https://npmjs.com/package/@backstage-community/plugin-acr/v/1.11.0[@backstage-community/plugin-acr]` |1.11.0
+|ACR |`https://npmjs.com/package/@backstage-community/plugin-acr/v/1.12.1[@backstage-community/plugin-acr]` |1.12.1
|`./dynamic-plugins/dist/backstage-community-plugin-acr`
-|Argo CD (Red Hat) |`https://npmjs.com/package/@backstage-community/plugin-redhat-argocd/v/1.14.0[@backstage-community/plugin-redhat-argocd]` |1.14.0
+|Argo CD (Red Hat) |`https://npmjs.com/package/@backstage-community/plugin-redhat-argocd/v/1.18.0[@backstage-community/plugin-redhat-argocd]` |1.18.0
|`./dynamic-plugins/dist/backstage-community-plugin-redhat-argocd`
@@ -40,15 +40,15 @@
`ARGOCD_AUTH_TOKEN2`
-|Azure |`https://npmjs.com/package/@backstage/plugin-scaffolder-backend-module-azure/v/0.2.5[@backstage/plugin-scaffolder-backend-module-azure]` |0.2.5
+|Azure |`https://npmjs.com/package/@backstage/plugin-scaffolder-backend-module-azure/v/0.2.6[@backstage/plugin-scaffolder-backend-module-azure]` |0.2.6
|`./dynamic-plugins/dist/backstage-plugin-scaffolder-backend-module-azure-dynamic`
-|Azure Devops |`https://npmjs.com/package/@backstage-community/plugin-azure-devops/v/0.9.0[@backstage-community/plugin-azure-devops]` |0.9.0
+|Azure Devops |`https://npmjs.com/package/@backstage-community/plugin-azure-devops/v/0.10.0[@backstage-community/plugin-azure-devops]` |0.10.0
|`./dynamic-plugins/dist/backstage-community-plugin-azure-devops`
-|Azure Devops |`https://npmjs.com/package/@backstage-community/plugin-azure-devops-backend/v/0.11.0[@backstage-community/plugin-azure-devops-backend]` |0.11.0
+|Azure Devops |`https://npmjs.com/package/@backstage-community/plugin-azure-devops-backend/v/0.12.0[@backstage-community/plugin-azure-devops-backend]` |0.12.0
|`./dynamic-plugins/dist/backstage-community-plugin-azure-devops-backend-dynamic`
`AZURE_TOKEN`
@@ -60,31 +60,31 @@
|`./dynamic-plugins/dist/parfuemerie-douglas-scaffolder-backend-module-azure-repositories-dynamic`
-|Bitbucket Cloud |`https://npmjs.com/package/@backstage/plugin-catalog-backend-module-bitbucket-cloud/v/0.4.4[@backstage/plugin-catalog-backend-module-bitbucket-cloud]` |0.4.4
+|Bitbucket Cloud |`https://npmjs.com/package/@backstage/plugin-catalog-backend-module-bitbucket-cloud/v/0.4.5[@backstage/plugin-catalog-backend-module-bitbucket-cloud]` |0.4.5
|`./dynamic-plugins/dist/backstage-plugin-catalog-backend-module-bitbucket-cloud-dynamic`
`BITBUCKET_WORKSPACE`
-|Bitbucket Cloud |`https://npmjs.com/package/@backstage/plugin-scaffolder-backend-module-bitbucket-cloud/v/0.2.5[@backstage/plugin-scaffolder-backend-module-bitbucket-cloud]` |0.2.5
+|Bitbucket Cloud |`https://npmjs.com/package/@backstage/plugin-scaffolder-backend-module-bitbucket-cloud/v/0.2.6[@backstage/plugin-scaffolder-backend-module-bitbucket-cloud]` |0.2.6
|`./dynamic-plugins/dist/backstage-plugin-scaffolder-backend-module-bitbucket-cloud-dynamic`
-|Bitbucket Server |`https://npmjs.com/package/@backstage/plugin-catalog-backend-module-bitbucket-server/v/0.3.1[@backstage/plugin-catalog-backend-module-bitbucket-server]` |0.3.1
+|Bitbucket Server |`https://npmjs.com/package/@backstage/plugin-catalog-backend-module-bitbucket-server/v/0.3.2[@backstage/plugin-catalog-backend-module-bitbucket-server]` |0.3.2
|`./dynamic-plugins/dist/backstage-plugin-catalog-backend-module-bitbucket-server-dynamic`
`BITBUCKET_HOST`
-|Bitbucket Server |`https://npmjs.com/package/@backstage/plugin-scaffolder-backend-module-bitbucket-server/v/0.2.5[@backstage/plugin-scaffolder-backend-module-bitbucket-server]` |0.2.5
+|Bitbucket Server |`https://npmjs.com/package/@backstage/plugin-scaffolder-backend-module-bitbucket-server/v/0.2.6[@backstage/plugin-scaffolder-backend-module-bitbucket-server]` |0.2.6
|`./dynamic-plugins/dist/backstage-plugin-scaffolder-backend-module-bitbucket-server-dynamic`
-|Bulk Import |`https://npmjs.com/package/@red-hat-developer-hub/backstage-plugin-bulk-import/v/1.11.0[@red-hat-developer-hub/backstage-plugin-bulk-import]` |1.11.0
+|Bulk Import |`https://npmjs.com/package/@red-hat-developer-hub/backstage-plugin-bulk-import/v/1.12.1[@red-hat-developer-hub/backstage-plugin-bulk-import]` |1.12.1
|`./dynamic-plugins/dist/red-hat-developer-hub-backstage-plugin-bulk-import`
-|Bulk Import |`https://npmjs.com/package/@red-hat-developer-hub/backstage-plugin-bulk-import-backend/v/5.3.0[@red-hat-developer-hub/backstage-plugin-bulk-import-backend]` |5.3.0
+|Bulk Import |`https://npmjs.com/package/@red-hat-developer-hub/backstage-plugin-bulk-import-backend/v/6.0.0[@red-hat-developer-hub/backstage-plugin-bulk-import-backend]` |6.0.0
|`./dynamic-plugins/dist/red-hat-developer-hub-backstage-plugin-bulk-import-backend-dynamic`
@@ -92,19 +92,15 @@
|`./dynamic-plugins/dist/roadiehq-backstage-plugin-datadog`
-|Dynatrace |`https://npmjs.com/package/@backstage-community/plugin-dynatrace/v/10.2.0[@backstage-community/plugin-dynatrace]` |10.2.0
+|Dynatrace |`https://npmjs.com/package/@backstage-community/plugin-dynatrace/v/10.3.0[@backstage-community/plugin-dynatrace]` |10.3.0
|`./dynamic-plugins/dist/backstage-community-plugin-dynatrace`
-|Gerrit |`https://npmjs.com/package/@backstage/plugin-scaffolder-backend-module-gerrit/v/0.2.5[@backstage/plugin-scaffolder-backend-module-gerrit]` |0.2.5
+|Gerrit |`https://npmjs.com/package/@backstage/plugin-scaffolder-backend-module-gerrit/v/0.2.6[@backstage/plugin-scaffolder-backend-module-gerrit]` |0.2.6
|`./dynamic-plugins/dist/backstage-plugin-scaffolder-backend-module-gerrit-dynamic`
-|GitHub |`https://npmjs.com/package/@backstage/plugin-scaffolder-backend-module-github/v/0.5.5[@backstage/plugin-scaffolder-backend-module-github]` |0.5.5
-|`./dynamic-plugins/dist/backstage-plugin-scaffolder-backend-module-github-dynamic`
-
-
-|GitHub Actions |`https://npmjs.com/package/@backstage-community/plugin-github-actions/v/0.7.1[@backstage-community/plugin-github-actions]` |0.7.1
+|GitHub Actions |`https://npmjs.com/package/@backstage-community/plugin-github-actions/v/0.8.0[@backstage-community/plugin-github-actions]` |0.8.0
|`./dynamic-plugins/dist/backstage-community-plugin-github-actions`
@@ -112,7 +108,7 @@
|`./dynamic-plugins/dist/roadiehq-backstage-plugin-github-insights`
-|GitHub Issues |`https://npmjs.com/package/@backstage-community/plugin-github-issues/v/0.6.0[@backstage-community/plugin-github-issues]` |0.6.0
+|GitHub Issues |`https://npmjs.com/package/@backstage-community/plugin-github-issues/v/0.7.0[@backstage-community/plugin-github-issues]` |0.7.0
|`./dynamic-plugins/dist/backstage-community-plugin-github-issues`
@@ -124,7 +120,7 @@
|`./dynamic-plugins/dist/immobiliarelabs-backstage-plugin-gitlab`
-|GitLab |`https://npmjs.com/package/@backstage/plugin-catalog-backend-module-gitlab/v/0.6.2[@backstage/plugin-catalog-backend-module-gitlab]` |0.6.2
+|GitLab |`https://npmjs.com/package/@backstage/plugin-catalog-backend-module-gitlab/v/0.6.3[@backstage/plugin-catalog-backend-module-gitlab]` |0.6.3
|`./dynamic-plugins/dist/backstage-plugin-catalog-backend-module-gitlab-dynamic`
@@ -136,11 +132,11 @@
`GITLAB_TOKEN`
-|GitLab |`https://npmjs.com/package/@backstage/plugin-scaffolder-backend-module-gitlab/v/0.7.1[@backstage/plugin-scaffolder-backend-module-gitlab]` |0.7.1
+|GitLab |`https://npmjs.com/package/@backstage/plugin-scaffolder-backend-module-gitlab/v/0.8.0[@backstage/plugin-scaffolder-backend-module-gitlab]` |0.8.0
|`./dynamic-plugins/dist/backstage-plugin-scaffolder-backend-module-gitlab-dynamic`
-|GitLab Org |`https://npmjs.com/package/@backstage/plugin-catalog-backend-module-gitlab-org/v/0.2.5[@backstage/plugin-catalog-backend-module-gitlab-org]` |0.2.5
+|GitLab Org |`https://npmjs.com/package/@backstage/plugin-catalog-backend-module-gitlab-org/v/0.2.6[@backstage/plugin-catalog-backend-module-gitlab-org]` |0.2.6
|`./dynamic-plugins/dist/backstage-plugin-catalog-backend-module-gitlab-org-dynamic`
@@ -148,11 +144,11 @@
|`./dynamic-plugins/dist/roadiehq-scaffolder-backend-module-http-request-dynamic`
-|Jenkins |`https://npmjs.com/package/@backstage-community/plugin-jenkins/v/0.16.0[@backstage-community/plugin-jenkins]` |0.16.0
+|Jenkins |`https://npmjs.com/package/@backstage-community/plugin-jenkins/v/0.17.0[@backstage-community/plugin-jenkins]` |0.17.0
|`./dynamic-plugins/dist/backstage-community-plugin-jenkins`
-|Jenkins |`https://npmjs.com/package/@backstage-community/plugin-jenkins-backend/v/0.11.0[@backstage-community/plugin-jenkins-backend]` |0.11.0
+|Jenkins |`https://npmjs.com/package/@backstage-community/plugin-jenkins-backend/v/0.12.1[@backstage-community/plugin-jenkins-backend]` |0.12.1
|`./dynamic-plugins/dist/backstage-community-plugin-jenkins-backend-dynamic`
`JENKINS_URL`
@@ -162,7 +158,7 @@
`JENKINS_TOKEN`
-|JFrog Artifactory |`https://npmjs.com/package/@backstage-community/plugin-jfrog-artifactory/v/1.13.0[@backstage-community/plugin-jfrog-artifactory]` |1.13.0
+|JFrog Artifactory |`https://npmjs.com/package/@backstage-community/plugin-jfrog-artifactory/v/1.13.3[@backstage-community/plugin-jfrog-artifactory]` |1.13.3
|`./dynamic-plugins/dist/backstage-community-plugin-jfrog-artifactory`
@@ -170,47 +166,47 @@
|`./dynamic-plugins/dist/roadiehq-backstage-plugin-jira`
-|Kubernetes |`https://npmjs.com/package/@backstage/plugin-kubernetes/v/0.12.3[@backstage/plugin-kubernetes]` |0.12.3
+|Kubernetes |`https://npmjs.com/package/@backstage/plugin-kubernetes/v/0.12.4[@backstage/plugin-kubernetes]` |0.12.4
|`./dynamic-plugins/dist/backstage-plugin-kubernetes`
-|Ldap |`https://npmjs.com/package/@backstage/plugin-catalog-backend-module-ldap/v/0.11.1[@backstage/plugin-catalog-backend-module-ldap]` |0.11.1
+|Ldap |`https://npmjs.com/package/@backstage/plugin-catalog-backend-module-ldap/v/0.11.2[@backstage/plugin-catalog-backend-module-ldap]` |0.11.2
|`./dynamic-plugins/dist/backstage-plugin-catalog-backend-module-ldap-dynamic`
-|Lighthouse |`https://npmjs.com/package/@backstage-community/plugin-lighthouse/v/0.6.0[@backstage-community/plugin-lighthouse]` |0.6.0
+|Lighthouse |`https://npmjs.com/package/@backstage-community/plugin-lighthouse/v/0.7.0[@backstage-community/plugin-lighthouse]` |0.7.0
|`./dynamic-plugins/dist/backstage-community-plugin-lighthouse`
-|Marketplace |`https://npmjs.com/package/@red-hat-developer-hub/backstage-plugin-marketplace/v/0.2.0[@red-hat-developer-hub/backstage-plugin-marketplace]` |0.2.1
+|Marketplace |`https://npmjs.com/package/@red-hat-developer-hub/backstage-plugin-marketplace/v/0.5.7[@red-hat-developer-hub/backstage-plugin-marketplace]` |0.5.7
|`./dynamic-plugins/dist/red-hat-developer-hub-backstage-plugin-marketplace`
-|Marketplace |`https://npmjs.com/package/@red-hat-developer-hub/backstage-plugin-catalog-backend-module-marketplace/v/0.2.0[@red-hat-developer-hub/backstage-plugin-catalog-backend-module-marketplace]` |0.2.2
+|Marketplace |`https://npmjs.com/package/@red-hat-developer-hub/backstage-plugin-catalog-backend-module-marketplace/v/0.3.0[@red-hat-developer-hub/backstage-plugin-catalog-backend-module-marketplace]` |0.3.0
|`./dynamic-plugins/dist/red-hat-developer-hub-backstage-plugin-catalog-backend-module-marketplace-dynamic`
-|Marketplace |`https://npmjs.com/package/@red-hat-developer-hub/backstage-plugin-marketplace-backend/v/0.2.0[@red-hat-developer-hub/backstage-plugin-marketplace-backend]` |0.2.0
+|Marketplace |`https://npmjs.com/package/@red-hat-developer-hub/backstage-plugin-marketplace-backend/v/0.3.1[@red-hat-developer-hub/backstage-plugin-marketplace-backend]` |0.3.1
|`./dynamic-plugins/dist/red-hat-developer-hub-backstage-plugin-marketplace-backend-dynamic`
-|MS Graph |`https://npmjs.com/package/@backstage/plugin-catalog-backend-module-msgraph/v/0.6.6[@backstage/plugin-catalog-backend-module-msgraph]` |0.6.6
+|MS Graph |`https://npmjs.com/package/@backstage/plugin-catalog-backend-module-msgraph/v/0.6.7[@backstage/plugin-catalog-backend-module-msgraph]` |0.6.7
|`./dynamic-plugins/dist/backstage-plugin-catalog-backend-module-msgraph-dynamic`
-|Nexus Repository Manager |`https://npmjs.com/package/@backstage-community/plugin-nexus-repository-manager/v/1.12.0[@backstage-community/plugin-nexus-repository-manager]` |1.12.0
+|Nexus Repository Manager |`https://npmjs.com/package/@backstage-community/plugin-nexus-repository-manager/v/1.13.0[@backstage-community/plugin-nexus-repository-manager]` |1.13.0
|`./dynamic-plugins/dist/backstage-community-plugin-nexus-repository-manager`
-|Notifications |`https://npmjs.com/package/@backstage/plugin-notifications/v/0.5.1[@backstage/plugin-notifications]` |0.5.1
+|Notifications |`https://npmjs.com/package/@backstage/plugin-notifications/v/0.5.2[@backstage/plugin-notifications]` |0.5.2
|`./dynamic-plugins/dist/backstage-plugin-notifications`
-|Notifications |`https://npmjs.com/package/@backstage/plugin-notifications-backend/v/0.5.1[@backstage/plugin-notifications-backend]` |0.5.1
+|Notifications |`https://npmjs.com/package/@backstage/plugin-notifications-backend/v/0.5.3[@backstage/plugin-notifications-backend]` |0.5.3
|`./dynamic-plugins/dist/backstage-plugin-notifications-backend-dynamic`
-|Notifications Module Email |`https://npmjs.com/package/@backstage/plugin-notifications-backend-module-email/v/0.3.5[@backstage/plugin-notifications-backend-module-email]` |0.3.5
+|Notifications Module Email |`https://npmjs.com/package/@backstage/plugin-notifications-backend-module-email/v/0.3.6[@backstage/plugin-notifications-backend-module-email]` |0.3.6
|`./dynamic-plugins/dist/backstage-plugin-notifications-backend-module-email-dynamic`
`EMAIL_HOSTNAME`
@@ -238,11 +234,11 @@
`PAGERDUTY_SUBDOMAIN`
-|Pingidentity |`https://npmjs.com/package/@backstage-community/plugin-catalog-backend-module-pingidentity/v/0.2.0[@backstage-community/plugin-catalog-backend-module-pingidentity]` |0.2.0
+|Pingidentity |`https://npmjs.com/package/@backstage-community/plugin-catalog-backend-module-pingidentity/v/0.3.0[@backstage-community/plugin-catalog-backend-module-pingidentity]` |0.3.0
|`./dynamic-plugins/dist/backstage-community-plugin-catalog-backend-module-pingidentity-dynamic`
-|Scaffolder Relation Processor |`https://npmjs.com/package/@backstage-community/plugin-catalog-backend-module-scaffolder-relation-processor/v/2.2.0[@backstage-community/plugin-catalog-backend-module-scaffolder-relation-processor]` |2.2.0
+|Scaffolder Relation Processor |`https://npmjs.com/package/@backstage-community/plugin-catalog-backend-module-scaffolder-relation-processor/v/2.3.0[@backstage-community/plugin-catalog-backend-module-scaffolder-relation-processor]` |2.3.0
|`./dynamic-plugins/dist/backstage-community-plugin-catalog-backend-module-scaffolder-relation-processor-dynamic`
@@ -250,7 +246,7 @@
|`./dynamic-plugins/dist/roadiehq-backstage-plugin-security-insights`
-|ServiceNow |`https://npmjs.com/package/@backstage-community/plugin-scaffolder-backend-module-servicenow/v/2.4.0[@backstage-community/plugin-scaffolder-backend-module-servicenow]` |2.4.0
+|ServiceNow |`https://npmjs.com/package/@backstage-community/plugin-scaffolder-backend-module-servicenow/v/2.5.0[@backstage-community/plugin-scaffolder-backend-module-servicenow]` |2.5.0
|`./dynamic-plugins/dist/backstage-community-plugin-scaffolder-backend-module-servicenow-dynamic`
`SERVICENOW_BASE_URL`
@@ -260,7 +256,7 @@
`SERVICENOW_PASSWORD`
-|Signals |`https://npmjs.com/package/@backstage/plugin-signals/v/0.0.15[@backstage/plugin-signals]` |0.0.15
+|Signals |`https://npmjs.com/package/@backstage/plugin-signals/v/0.0.16[@backstage/plugin-signals]` |0.0.16
|`./dynamic-plugins/dist/backstage-plugin-signals`
@@ -276,15 +272,15 @@
`SONARQUBE_TOKEN`
-|SonarQube |`https://npmjs.com/package/@backstage-community/plugin-scaffolder-backend-module-sonarqube/v/2.4.0[@backstage-community/plugin-scaffolder-backend-module-sonarqube]` |2.4.0
+|SonarQube |`https://npmjs.com/package/@backstage-community/plugin-scaffolder-backend-module-sonarqube/v/2.5.0[@backstage-community/plugin-scaffolder-backend-module-sonarqube]` |2.5.0
|`./dynamic-plugins/dist/backstage-community-plugin-scaffolder-backend-module-sonarqube-dynamic`
-|Tech Radar |`https://npmjs.com/package/@backstage-community/plugin-tech-radar/v/1.2.0[@backstage-community/plugin-tech-radar]` |1.2.0
+|Tech Radar |`https://npmjs.com/package/@backstage-community/plugin-tech-radar/v/1.3.0[@backstage-community/plugin-tech-radar]` |1.3.0
|`./dynamic-plugins/dist/backstage-community-plugin-tech-radar`
-|Tech Radar |`https://npmjs.com/package/@backstage-community/plugin-tech-radar-backend/v/1.2.0[@backstage-community/plugin-tech-radar-backend]` |1.2.0
+|Tech Radar |`https://npmjs.com/package/@backstage-community/plugin-tech-radar-backend/v/1.3.0[@backstage-community/plugin-tech-radar-backend]` |1.3.0
|`./dynamic-plugins/dist/backstage-community-plugin-tech-radar-backend-dynamic`
`TECH_RADAR_DATA_URL`
diff --git a/modules/dynamic-plugins/rhdh-supported-plugins.csv b/modules/dynamic-plugins/rhdh-supported-plugins.csv
index 8a7d3b3f19..7ba9bd55e5 100644
--- a/modules/dynamic-plugins/rhdh-supported-plugins.csv
+++ b/modules/dynamic-plugins/rhdh-supported-plugins.csv
@@ -1,77 +1,77 @@
"Name","Plugin","Role","Version","Support Level","Path","Required Variables","Default"
-"Analytics Provider Segment ","@backstage-community/plugin-analytics-provider-segment","Frontend","1.12.0","Production","./dynamic-plugins/dist/backstage-community-plugin-analytics-provider-segment","`SEGMENT_WRITE_KEY`;`SEGMENT_TEST_MODE`;","Enabled"
-"Argo CD ","@roadiehq/backstage-plugin-argo-cd-backend","Backend","3.2.3","Production","./dynamic-plugins/dist/roadiehq-backstage-plugin-argo-cd-backend-dynamic","`ARGOCD_USERNAME`;`ARGOCD_PASSWORD`;`ARGOCD_INSTANCE1_URL`;`ARGOCD_AUTH_TOKEN`;`ARGOCD_INSTANCE2_URL`;`ARGOCD_AUTH_TOKEN2`;","Disabled"
-"Dynamic Home Page ","@red-hat-developer-hub/backstage-plugin-dynamic-home-page","Frontend","1.1.0","Production","./dynamic-plugins/dist/red-hat-developer-hub-backstage-plugin-dynamic-home-page",";","Enabled"
-"GitHub ","@backstage/plugin-catalog-backend-module-github","Backend","0.7.9","Production","./dynamic-plugins/dist/backstage-plugin-catalog-backend-module-github-dynamic","`GITHUB_ORG`;","Disabled"
-"GitHub Org ","@backstage/plugin-catalog-backend-module-github-org","Backend","0.3.6","Production","./dynamic-plugins/dist/backstage-plugin-catalog-backend-module-github-org-dynamic","`GITHUB_URL`;`GITHUB_ORG`;","Disabled"
-"Global Header ","@red-hat-developer-hub/backstage-plugin-global-header","Frontend","1.0.0","Production","./dynamic-plugins/dist/red-hat-developer-hub-backstage-plugin-global-header",";","Enabled"
-"Keycloak ","@backstage-community/plugin-catalog-backend-module-keycloak","Backend","3.7.0","Production","./dynamic-plugins/dist/backstage-community-plugin-catalog-backend-module-keycloak-dynamic","`KEYCLOAK_BASE_URL`;`KEYCLOAK_LOGIN_REALM`;`KEYCLOAK_REALM`;`KEYCLOAK_CLIENT_ID`;`KEYCLOAK_CLIENT_SECRET`;","Disabled"
-"Kubernetes ","@backstage/plugin-kubernetes-backend","Backend","0.19.2","Production","./dynamic-plugins/dist/backstage-plugin-kubernetes-backend-dynamic","`K8S_CLUSTER_NAME`;`K8S_CLUSTER_URL`;`K8S_CLUSTER_TOKEN`;","Disabled"
-"Kubernetes ","@backstage-community/plugin-scaffolder-backend-module-kubernetes","Backend","2.5.0","Production","./dynamic-plugins/dist/backstage-community-plugin-scaffolder-backend-module-kubernetes-dynamic",";","Disabled"
-"OCM ","@backstage-community/plugin-ocm","Frontend","5.3.0","Production","./dynamic-plugins/dist/backstage-community-plugin-ocm",";","Disabled"
-"OCM ","@backstage-community/plugin-ocm-backend","Backend","5.4.0","Production","./dynamic-plugins/dist/backstage-community-plugin-ocm-backend-dynamic","`OCM_HUB_NAME`;`OCM_HUB_URL`;`OCM_SA_TOKEN`;","Disabled"
-"Quay ","@backstage-community/plugin-quay","Frontend","1.18.1","Production","./dynamic-plugins/dist/backstage-community-plugin-quay",";","Disabled"
-"Quay ","@backstage-community/plugin-scaffolder-backend-module-quay","Backend","2.4.0","Production","./dynamic-plugins/dist/backstage-community-plugin-scaffolder-backend-module-quay-dynamic",";","Enabled"
-"RBAC ","@backstage-community/plugin-rbac","Frontend","1.38.2","Production","./dynamic-plugins/dist/backstage-community-plugin-rbac",";","Disabled"
-"Regex ","@backstage-community/plugin-scaffolder-backend-module-regex","Backend","2.4.0","Production","./dynamic-plugins/dist/backstage-community-plugin-scaffolder-backend-module-regex-dynamic",";","Enabled"
-"Signals ","@backstage/plugin-signals-backend","Backend","0.3.0","Production","./dynamic-plugins/dist/backstage-plugin-signals-backend-dynamic",";","Disabled"
-"Tekton ","@backstage-community/plugin-tekton","Frontend","3.19.0","Production","./dynamic-plugins/dist/backstage-community-plugin-tekton",";","Disabled"
-"Topology ","@backstage-community/plugin-topology","Frontend","1.32.0","Production","./dynamic-plugins/dist/backstage-community-plugin-topology",";","Disabled"
-"3scale ","@backstage-community/plugin-3scale-backend","Backend","3.2.0","Red Hat Tech Preview","./dynamic-plugins/dist/backstage-community-plugin-3scale-backend-dynamic","`THREESCALE_BASE_URL`;`THREESCALE_ACCESS_TOKEN`;","Disabled"
-"ACR ","@backstage-community/plugin-acr","Frontend","1.11.0","Red Hat Tech Preview","./dynamic-plugins/dist/backstage-community-plugin-acr",";","Disabled"
-"Argo CD (Red Hat) ","@backstage-community/plugin-redhat-argocd","Frontend","1.14.0","Red Hat Tech Preview","./dynamic-plugins/dist/backstage-community-plugin-redhat-argocd",";","Disabled"
+"Analytics Provider Segment ","@backstage-community/plugin-analytics-provider-segment","Frontend","1.13.0","Production","./dynamic-plugins/dist/backstage-community-plugin-analytics-provider-segment","`SEGMENT_WRITE_KEY`;`SEGMENT_TEST_MODE`;","Enabled"
+"Argo CD ","@roadiehq/backstage-plugin-argo-cd","Frontend","2.8.4","Production","./dynamic-plugins/dist/roadiehq-backstage-plugin-argo-cd",";","Disabled"
+"Argo CD ","@roadiehq/backstage-plugin-argo-cd-backend","Backend","4.2.0","Production","./dynamic-plugins/dist/roadiehq-backstage-plugin-argo-cd-backend-dynamic","`ARGOCD_USERNAME`;`ARGOCD_PASSWORD`;`ARGOCD_INSTANCE1_URL`;`ARGOCD_AUTH_TOKEN`;`ARGOCD_INSTANCE2_URL`;`ARGOCD_AUTH_TOKEN2`;","Disabled"
+"Dynamic Home Page ","@red-hat-developer-hub/backstage-plugin-dynamic-home-page","Frontend","1.3.0","Production","./dynamic-plugins/dist/red-hat-developer-hub-backstage-plugin-dynamic-home-page",";","Enabled"
+"GitHub ","@backstage/plugin-catalog-backend-module-github","Backend","0.7.10","Production","./dynamic-plugins/dist/backstage-plugin-catalog-backend-module-github-dynamic","`GITHUB_ORG`;","Disabled"
+"GitHub ","@backstage/plugin-scaffolder-backend-module-github","Backend","0.6.0","Production","./dynamic-plugins/dist/backstage-plugin-scaffolder-backend-module-github-dynamic",";","Disabled"
+"GitHub Org ","@backstage/plugin-catalog-backend-module-github-org","Backend","0.3.7","Production","./dynamic-plugins/dist/backstage-plugin-catalog-backend-module-github-org-dynamic","`GITHUB_URL`;`GITHUB_ORG`;","Disabled"
+"Global Floating Action Button ","@red-hat-developer-hub/backstage-plugin-global-floating-action-button","Frontend","1.1.4","Production","./dynamic-plugins/dist/red-hat-developer-hub-backstage-plugin-global-floating-action-button",";","Enabled"
+"Global Header ","@red-hat-developer-hub/backstage-plugin-global-header","Frontend","1.5.0","Production","./dynamic-plugins/dist/red-hat-developer-hub-backstage-plugin-global-header",";","Enabled"
+"Keycloak ","@backstage-community/plugin-catalog-backend-module-keycloak","Backend","3.10.0","Production","./dynamic-plugins/dist/backstage-community-plugin-catalog-backend-module-keycloak-dynamic","`KEYCLOAK_BASE_URL`;`KEYCLOAK_LOGIN_REALM`;`KEYCLOAK_REALM`;`KEYCLOAK_CLIENT_ID`;`KEYCLOAK_CLIENT_SECRET`;","Disabled"
+"Kubernetes ","@backstage/plugin-kubernetes-backend","Backend","0.19.3","Production","./dynamic-plugins/dist/backstage-plugin-kubernetes-backend-dynamic","`K8S_CLUSTER_NAME`;`K8S_CLUSTER_URL`;`K8S_CLUSTER_TOKEN`;","Disabled"
+"Kubernetes ","@backstage-community/plugin-scaffolder-backend-module-kubernetes","Backend","2.6.0","Production","./dynamic-plugins/dist/backstage-community-plugin-scaffolder-backend-module-kubernetes-dynamic",";","Disabled"
+"OCM ","@backstage-community/plugin-ocm","Frontend","5.4.0","Production","./dynamic-plugins/dist/backstage-community-plugin-ocm",";","Disabled"
+"OCM ","@backstage-community/plugin-ocm-backend","Backend","5.5.0","Production","./dynamic-plugins/dist/backstage-community-plugin-ocm-backend-dynamic","`OCM_HUB_NAME`;`OCM_HUB_URL`;`OCM_SA_TOKEN`;","Disabled"
+"Quay ","@backstage-community/plugin-quay","Frontend","1.19.0","Production","./dynamic-plugins/dist/backstage-community-plugin-quay",";","Disabled"
+"Quay ","@backstage-community/plugin-scaffolder-backend-module-quay","Backend","2.6.0","Production","./dynamic-plugins/dist/backstage-community-plugin-scaffolder-backend-module-quay-dynamic",";","Enabled"
+"RBAC ","@backstage-community/plugin-rbac","Frontend","1.39.2","Production","./dynamic-plugins/dist/backstage-community-plugin-rbac",";","Disabled"
+"Regex ","@backstage-community/plugin-scaffolder-backend-module-regex","Backend","2.5.0","Production","./dynamic-plugins/dist/backstage-community-plugin-scaffolder-backend-module-regex-dynamic",";","Enabled"
+"Signals ","@backstage/plugin-signals-backend","Backend","0.3.1","Production","./dynamic-plugins/dist/backstage-plugin-signals-backend-dynamic",";","Disabled"
+"Tekton ","@backstage-community/plugin-tekton","Frontend","3.22.0","Production","./dynamic-plugins/dist/backstage-community-plugin-tekton",";","Disabled"
+"Topology ","@backstage-community/plugin-topology","Frontend","2.0.0","Production","./dynamic-plugins/dist/backstage-community-plugin-topology",";","Disabled"
+"3scale ","@backstage-community/plugin-3scale-backend","Backend","3.3.0","Red Hat Tech Preview","./dynamic-plugins/dist/backstage-community-plugin-3scale-backend-dynamic","`THREESCALE_BASE_URL`;`THREESCALE_ACCESS_TOKEN`;","Disabled"
+"ACR ","@backstage-community/plugin-acr","Frontend","1.12.1","Red Hat Tech Preview","./dynamic-plugins/dist/backstage-community-plugin-acr",";","Disabled"
+"Argo CD (Red Hat) ","@backstage-community/plugin-redhat-argocd","Frontend","1.18.0","Red Hat Tech Preview","./dynamic-plugins/dist/backstage-community-plugin-redhat-argocd",";","Disabled"
"Argo CD ","@roadiehq/scaffolder-backend-argocd","Backend","1.5.0","Red Hat Tech Preview","./dynamic-plugins/dist/roadiehq-scaffolder-backend-argocd-dynamic","`ARGOCD_USERNAME`;`ARGOCD_PASSWORD`;`ARGOCD_INSTANCE1_URL`;`ARGOCD_AUTH_TOKEN`;`ARGOCD_INSTANCE2_URL`;`ARGOCD_AUTH_TOKEN2`;","Disabled"
-"Azure ","@backstage/plugin-scaffolder-backend-module-azure","Backend","0.2.5","Red Hat Tech Preview","./dynamic-plugins/dist/backstage-plugin-scaffolder-backend-module-azure-dynamic",";","Disabled"
-"Azure Devops ","@backstage-community/plugin-azure-devops","Frontend","0.9.0","Red Hat Tech Preview","./dynamic-plugins/dist/backstage-community-plugin-azure-devops",";","Disabled"
-"Azure Devops ","@backstage-community/plugin-azure-devops-backend","Backend","0.11.0","Red Hat Tech Preview","./dynamic-plugins/dist/backstage-community-plugin-azure-devops-backend-dynamic","`AZURE_TOKEN`;`AZURE_ORG`;","Disabled"
+"Azure ","@backstage/plugin-scaffolder-backend-module-azure","Backend","0.2.6","Red Hat Tech Preview","./dynamic-plugins/dist/backstage-plugin-scaffolder-backend-module-azure-dynamic",";","Disabled"
+"Azure Devops ","@backstage-community/plugin-azure-devops","Frontend","0.10.0","Red Hat Tech Preview","./dynamic-plugins/dist/backstage-community-plugin-azure-devops",";","Disabled"
+"Azure Devops ","@backstage-community/plugin-azure-devops-backend","Backend","0.12.0","Red Hat Tech Preview","./dynamic-plugins/dist/backstage-community-plugin-azure-devops-backend-dynamic","`AZURE_TOKEN`;`AZURE_ORG`;","Disabled"
"Azure Repositories ","@parfuemerie-douglas/scaffolder-backend-module-azure-repositories","Backend","0.3.0","Red Hat Tech Preview","./dynamic-plugins/dist/parfuemerie-douglas-scaffolder-backend-module-azure-repositories-dynamic",";","Disabled"
-"Bitbucket Cloud ","@backstage/plugin-catalog-backend-module-bitbucket-cloud","Backend","0.4.4","Red Hat Tech Preview","./dynamic-plugins/dist/backstage-plugin-catalog-backend-module-bitbucket-cloud-dynamic","`BITBUCKET_WORKSPACE`;","Disabled"
-"Bitbucket Cloud ","@backstage/plugin-scaffolder-backend-module-bitbucket-cloud","Backend","0.2.5","Red Hat Tech Preview","./dynamic-plugins/dist/backstage-plugin-scaffolder-backend-module-bitbucket-cloud-dynamic",";","Disabled"
-"Bitbucket Server ","@backstage/plugin-catalog-backend-module-bitbucket-server","Backend","0.3.1","Red Hat Tech Preview","./dynamic-plugins/dist/backstage-plugin-catalog-backend-module-bitbucket-server-dynamic","`BITBUCKET_HOST`;","Disabled"
-"Bitbucket Server ","@backstage/plugin-scaffolder-backend-module-bitbucket-server","Backend","0.2.5","Red Hat Tech Preview","./dynamic-plugins/dist/backstage-plugin-scaffolder-backend-module-bitbucket-server-dynamic",";","Disabled"
-"Bulk Import ","@red-hat-developer-hub/backstage-plugin-bulk-import","Frontend","1.11.0","Red Hat Tech Preview","./dynamic-plugins/dist/red-hat-developer-hub-backstage-plugin-bulk-import",";","Disabled"
-"Bulk Import ","@red-hat-developer-hub/backstage-plugin-bulk-import-backend","Backend","5.3.0","Red Hat Tech Preview","./dynamic-plugins/dist/red-hat-developer-hub-backstage-plugin-bulk-import-backend-dynamic",";","Disabled"
+"Bitbucket Cloud ","@backstage/plugin-catalog-backend-module-bitbucket-cloud","Backend","0.4.5","Red Hat Tech Preview","./dynamic-plugins/dist/backstage-plugin-catalog-backend-module-bitbucket-cloud-dynamic","`BITBUCKET_WORKSPACE`;","Disabled"
+"Bitbucket Cloud ","@backstage/plugin-scaffolder-backend-module-bitbucket-cloud","Backend","0.2.6","Red Hat Tech Preview","./dynamic-plugins/dist/backstage-plugin-scaffolder-backend-module-bitbucket-cloud-dynamic",";","Disabled"
+"Bitbucket Server ","@backstage/plugin-catalog-backend-module-bitbucket-server","Backend","0.3.2","Red Hat Tech Preview","./dynamic-plugins/dist/backstage-plugin-catalog-backend-module-bitbucket-server-dynamic","`BITBUCKET_HOST`;","Disabled"
+"Bitbucket Server ","@backstage/plugin-scaffolder-backend-module-bitbucket-server","Backend","0.2.6","Red Hat Tech Preview","./dynamic-plugins/dist/backstage-plugin-scaffolder-backend-module-bitbucket-server-dynamic",";","Disabled"
+"Bulk Import ","@red-hat-developer-hub/backstage-plugin-bulk-import","Frontend","1.12.1","Red Hat Tech Preview","./dynamic-plugins/dist/red-hat-developer-hub-backstage-plugin-bulk-import",";","Disabled"
+"Bulk Import ","@red-hat-developer-hub/backstage-plugin-bulk-import-backend","Backend","6.0.0","Red Hat Tech Preview","./dynamic-plugins/dist/red-hat-developer-hub-backstage-plugin-bulk-import-backend-dynamic",";","Disabled"
"Datadog ","@roadiehq/backstage-plugin-datadog","Frontend","2.4.2","Red Hat Tech Preview","./dynamic-plugins/dist/roadiehq-backstage-plugin-datadog",";","Disabled"
-"Dynatrace ","@backstage-community/plugin-dynatrace","Frontend","10.2.0","Red Hat Tech Preview","./dynamic-plugins/dist/backstage-community-plugin-dynatrace",";","Disabled"
-"Gerrit ","@backstage/plugin-scaffolder-backend-module-gerrit","Backend","0.2.5","Red Hat Tech Preview","./dynamic-plugins/dist/backstage-plugin-scaffolder-backend-module-gerrit-dynamic",";","Disabled"
-"GitHub ","@backstage/plugin-scaffolder-backend-module-github","Backend","0.5.5","Red Hat Tech Preview","./dynamic-plugins/dist/backstage-plugin-scaffolder-backend-module-github-dynamic",";","Disabled"
-"GitHub Actions ","@backstage-community/plugin-github-actions","Frontend","0.7.1","Red Hat Tech Preview","./dynamic-plugins/dist/backstage-community-plugin-github-actions",";","Disabled"
+"Dynatrace ","@backstage-community/plugin-dynatrace","Frontend","10.3.0","Red Hat Tech Preview","./dynamic-plugins/dist/backstage-community-plugin-dynatrace",";","Disabled"
+"Gerrit ","@backstage/plugin-scaffolder-backend-module-gerrit","Backend","0.2.6","Red Hat Tech Preview","./dynamic-plugins/dist/backstage-plugin-scaffolder-backend-module-gerrit-dynamic",";","Disabled"
+"GitHub Actions ","@backstage-community/plugin-github-actions","Frontend","0.8.0","Red Hat Tech Preview","./dynamic-plugins/dist/backstage-community-plugin-github-actions",";","Disabled"
"GitHub Insights ","@roadiehq/backstage-plugin-github-insights","Frontend","3.1.3","Red Hat Tech Preview","./dynamic-plugins/dist/roadiehq-backstage-plugin-github-insights",";","Disabled"
-"GitHub Issues ","@backstage-community/plugin-github-issues","Frontend","0.6.0","Red Hat Tech Preview","./dynamic-plugins/dist/backstage-community-plugin-github-issues",";","Disabled"
+"GitHub Issues ","@backstage-community/plugin-github-issues","Frontend","0.7.0","Red Hat Tech Preview","./dynamic-plugins/dist/backstage-community-plugin-github-issues",";","Disabled"
"GitHub Pull Requests ","@roadiehq/backstage-plugin-github-pull-requests","Frontend","3.2.1","Red Hat Tech Preview","./dynamic-plugins/dist/roadiehq-backstage-plugin-github-pull-requests",";","Disabled"
"GitLab ","@immobiliarelabs/backstage-plugin-gitlab","Frontend","6.8.0","Red Hat Tech Preview","./dynamic-plugins/dist/immobiliarelabs-backstage-plugin-gitlab",";","Disabled"
-"GitLab ","@backstage/plugin-catalog-backend-module-gitlab","Backend","0.6.2","Red Hat Tech Preview","./dynamic-plugins/dist/backstage-plugin-catalog-backend-module-gitlab-dynamic",";","Disabled"
+"GitLab ","@backstage/plugin-catalog-backend-module-gitlab","Backend","0.6.3","Red Hat Tech Preview","./dynamic-plugins/dist/backstage-plugin-catalog-backend-module-gitlab-dynamic",";","Disabled"
"GitLab ","@immobiliarelabs/backstage-plugin-gitlab-backend","Backend","6.8.0","Red Hat Tech Preview","./dynamic-plugins/dist/immobiliarelabs-backstage-plugin-gitlab-backend-dynamic","`GITLAB_HOST`;`GITLAB_TOKEN`;","Disabled"
-"GitLab ","@backstage/plugin-scaffolder-backend-module-gitlab","Backend","0.7.1","Red Hat Tech Preview","./dynamic-plugins/dist/backstage-plugin-scaffolder-backend-module-gitlab-dynamic",";","Disabled"
-"GitLab Org ","@backstage/plugin-catalog-backend-module-gitlab-org","Backend","0.2.5","Red Hat Tech Preview","./dynamic-plugins/dist/backstage-plugin-catalog-backend-module-gitlab-org-dynamic",";","Disabled"
+"GitLab ","@backstage/plugin-scaffolder-backend-module-gitlab","Backend","0.8.0","Red Hat Tech Preview","./dynamic-plugins/dist/backstage-plugin-scaffolder-backend-module-gitlab-dynamic",";","Disabled"
+"GitLab Org ","@backstage/plugin-catalog-backend-module-gitlab-org","Backend","0.2.6","Red Hat Tech Preview","./dynamic-plugins/dist/backstage-plugin-catalog-backend-module-gitlab-org-dynamic",";","Disabled"
"Http Request ","@roadiehq/scaffolder-backend-module-http-request","Backend","5.3.0","Red Hat Tech Preview","./dynamic-plugins/dist/roadiehq-scaffolder-backend-module-http-request-dynamic",";","Disabled"
-"Jenkins ","@backstage-community/plugin-jenkins","Frontend","0.16.0","Red Hat Tech Preview","./dynamic-plugins/dist/backstage-community-plugin-jenkins",";","Disabled"
-"Jenkins ","@backstage-community/plugin-jenkins-backend","Backend","0.11.0","Red Hat Tech Preview","./dynamic-plugins/dist/backstage-community-plugin-jenkins-backend-dynamic","`JENKINS_URL`;`JENKINS_USERNAME`;`JENKINS_TOKEN`;","Disabled"
-"JFrog Artifactory ","@backstage-community/plugin-jfrog-artifactory","Frontend","1.13.0","Red Hat Tech Preview","./dynamic-plugins/dist/backstage-community-plugin-jfrog-artifactory",";","Disabled"
+"Jenkins ","@backstage-community/plugin-jenkins","Frontend","0.17.0","Red Hat Tech Preview","./dynamic-plugins/dist/backstage-community-plugin-jenkins",";","Disabled"
+"Jenkins ","@backstage-community/plugin-jenkins-backend","Backend","0.12.1","Red Hat Tech Preview","./dynamic-plugins/dist/backstage-community-plugin-jenkins-backend-dynamic","`JENKINS_URL`;`JENKINS_USERNAME`;`JENKINS_TOKEN`;","Disabled"
+"JFrog Artifactory ","@backstage-community/plugin-jfrog-artifactory","Frontend","1.13.3","Red Hat Tech Preview","./dynamic-plugins/dist/backstage-community-plugin-jfrog-artifactory",";","Disabled"
"Jira ","@roadiehq/backstage-plugin-jira","Frontend","2.8.2","Red Hat Tech Preview","./dynamic-plugins/dist/roadiehq-backstage-plugin-jira",";","Disabled"
-"Kubernetes ","@backstage/plugin-kubernetes","Frontend","0.12.3","Red Hat Tech Preview","./dynamic-plugins/dist/backstage-plugin-kubernetes",";","Disabled"
-"Ldap ","@backstage/plugin-catalog-backend-module-ldap","Backend","0.11.1","Red Hat Tech Preview","./dynamic-plugins/dist/backstage-plugin-catalog-backend-module-ldap-dynamic",";","Disabled"
-"Lighthouse ","@backstage-community/plugin-lighthouse","Frontend","0.6.0","Red Hat Tech Preview","./dynamic-plugins/dist/backstage-community-plugin-lighthouse",";","Disabled"
-"Marketplace ","@red-hat-developer-hub/backstage-plugin-marketplace","Frontend","0.2.0","Red Hat Tech Preview","./dynamic-plugins/dist/red-hat-developer-hub-backstage-plugin-marketplace",";","Enabled"
-"Marketplace ","@red-hat-developer-hub/backstage-plugin-catalog-backend-module-marketplace","Backend","0.2.0","Red Hat Tech Preview","./dynamic-plugins/dist/red-hat-developer-hub-backstage-plugin-catalog-backend-module-marketplace-dynamic",";","Enabled"
-"Marketplace ","@red-hat-developer-hub/backstage-plugin-marketplace-backend","Backend","0.2.0","Red Hat Tech Preview","./dynamic-plugins/dist/red-hat-developer-hub-backstage-plugin-marketplace-backend-dynamic",";","Enabled"
-"MS Graph ","@backstage/plugin-catalog-backend-module-msgraph","Backend","0.6.6","Red Hat Tech Preview","./dynamic-plugins/dist/backstage-plugin-catalog-backend-module-msgraph-dynamic",";","Disabled"
-"Nexus Repository Manager ","@backstage-community/plugin-nexus-repository-manager","Frontend","1.12.0","Red Hat Tech Preview","./dynamic-plugins/dist/backstage-community-plugin-nexus-repository-manager",";","Disabled"
-"Notifications ","@backstage/plugin-notifications","Frontend","0.5.1","Red Hat Tech Preview","./dynamic-plugins/dist/backstage-plugin-notifications",";","Disabled"
-"Notifications ","@backstage/plugin-notifications-backend","Backend","0.5.1","Red Hat Tech Preview","./dynamic-plugins/dist/backstage-plugin-notifications-backend-dynamic",";","Disabled"
-"Notifications Module Email ","@backstage/plugin-notifications-backend-module-email","Backend","0.3.5","Red Hat Tech Preview","./dynamic-plugins/dist/backstage-plugin-notifications-backend-module-email-dynamic","`EMAIL_HOSTNAME`;`EMAIL_USERNAME`;`EMAIL_PASSWORD`;`EMAIL_SENDER`;","Disabled"
+"Kubernetes ","@backstage/plugin-kubernetes","Frontend","0.12.4","Red Hat Tech Preview","./dynamic-plugins/dist/backstage-plugin-kubernetes",";","Disabled"
+"Ldap ","@backstage/plugin-catalog-backend-module-ldap","Backend","0.11.2","Red Hat Tech Preview","./dynamic-plugins/dist/backstage-plugin-catalog-backend-module-ldap-dynamic",";","Disabled"
+"Lighthouse ","@backstage-community/plugin-lighthouse","Frontend","0.7.0","Red Hat Tech Preview","./dynamic-plugins/dist/backstage-community-plugin-lighthouse",";","Disabled"
+"Marketplace ","@red-hat-developer-hub/backstage-plugin-marketplace","Frontend","0.5.7","Red Hat Tech Preview","./dynamic-plugins/dist/red-hat-developer-hub-backstage-plugin-marketplace",";","Enabled"
+"Marketplace ","@red-hat-developer-hub/backstage-plugin-catalog-backend-module-marketplace","Backend","0.3.0","Red Hat Tech Preview","./dynamic-plugins/dist/red-hat-developer-hub-backstage-plugin-catalog-backend-module-marketplace-dynamic",";","Enabled"
+"Marketplace ","@red-hat-developer-hub/backstage-plugin-marketplace-backend","Backend","0.3.1","Red Hat Tech Preview","./dynamic-plugins/dist/red-hat-developer-hub-backstage-plugin-marketplace-backend-dynamic",";","Enabled"
+"MS Graph ","@backstage/plugin-catalog-backend-module-msgraph","Backend","0.6.7","Red Hat Tech Preview","./dynamic-plugins/dist/backstage-plugin-catalog-backend-module-msgraph-dynamic",";","Disabled"
+"Nexus Repository Manager ","@backstage-community/plugin-nexus-repository-manager","Frontend","1.13.0","Red Hat Tech Preview","./dynamic-plugins/dist/backstage-community-plugin-nexus-repository-manager",";","Disabled"
+"Notifications ","@backstage/plugin-notifications","Frontend","0.5.2","Red Hat Tech Preview","./dynamic-plugins/dist/backstage-plugin-notifications",";","Disabled"
+"Notifications ","@backstage/plugin-notifications-backend","Backend","0.5.3","Red Hat Tech Preview","./dynamic-plugins/dist/backstage-plugin-notifications-backend-dynamic",";","Disabled"
+"Notifications Module Email ","@backstage/plugin-notifications-backend-module-email","Backend","0.3.6","Red Hat Tech Preview","./dynamic-plugins/dist/backstage-plugin-notifications-backend-module-email-dynamic","`EMAIL_HOSTNAME`;`EMAIL_USERNAME`;`EMAIL_PASSWORD`;`EMAIL_SENDER`;","Disabled"
"PagerDuty ","@pagerduty/backstage-plugin","Frontend","0.15.2","Red Hat Tech Preview","./dynamic-plugins/dist/pagerduty-backstage-plugin",";","Disabled"
"PagerDuty ","@pagerduty/backstage-plugin-backend","Backend","0.9.2","Red Hat Tech Preview","./dynamic-plugins/dist/pagerduty-backstage-plugin-backend-dynamic","`PAGERDUTY_API_BASE`;`PAGERDUTY_CLIENT_ID`;`PAGERDUTY_CLIENT_SECRET`;`PAGERDUTY_SUBDOMAIN`;","Disabled"
-"Pingidentity ","@backstage-community/plugin-catalog-backend-module-pingidentity","Backend","0.2.0","Red Hat Tech Preview","./dynamic-plugins/dist/backstage-community-plugin-catalog-backend-module-pingidentity-dynamic",";","Disabled"
-"Scaffolder Relation Processor ","@backstage-community/plugin-catalog-backend-module-scaffolder-relation-processor","Backend","2.2.0","Red Hat Tech Preview","./dynamic-plugins/dist/backstage-community-plugin-catalog-backend-module-scaffolder-relation-processor-dynamic",";","Disabled"
+"Pingidentity ","@backstage-community/plugin-catalog-backend-module-pingidentity","Backend","0.3.0","Red Hat Tech Preview","./dynamic-plugins/dist/backstage-community-plugin-catalog-backend-module-pingidentity-dynamic",";","Disabled"
+"Scaffolder Relation Processor ","@backstage-community/plugin-catalog-backend-module-scaffolder-relation-processor","Backend","2.3.0","Red Hat Tech Preview","./dynamic-plugins/dist/backstage-community-plugin-catalog-backend-module-scaffolder-relation-processor-dynamic",";","Disabled"
"Security Insights ","@roadiehq/backstage-plugin-security-insights","Frontend","3.1.2","Red Hat Tech Preview","./dynamic-plugins/dist/roadiehq-backstage-plugin-security-insights",";","Disabled"
-"ServiceNow ","@backstage-community/plugin-scaffolder-backend-module-servicenow","Backend","2.4.0","Red Hat Tech Preview","./dynamic-plugins/dist/backstage-community-plugin-scaffolder-backend-module-servicenow-dynamic","`SERVICENOW_BASE_URL`;`SERVICENOW_USERNAME`;`SERVICENOW_PASSWORD`;","Disabled"
-"Signals ","@backstage/plugin-signals","Frontend","0.0.15","Red Hat Tech Preview","./dynamic-plugins/dist/backstage-plugin-signals",";","Disabled"
+"ServiceNow ","@backstage-community/plugin-scaffolder-backend-module-servicenow","Backend","2.5.0","Red Hat Tech Preview","./dynamic-plugins/dist/backstage-community-plugin-scaffolder-backend-module-servicenow-dynamic","`SERVICENOW_BASE_URL`;`SERVICENOW_USERNAME`;`SERVICENOW_PASSWORD`;","Disabled"
+"Signals ","@backstage/plugin-signals","Frontend","0.0.16","Red Hat Tech Preview","./dynamic-plugins/dist/backstage-plugin-signals",";","Disabled"
"SonarQube ","@backstage-community/plugin-sonarqube","Frontend","0.10.0","Red Hat Tech Preview","./dynamic-plugins/dist/backstage-community-plugin-sonarqube",";","Disabled"
"SonarQube ","@backstage-community/plugin-sonarqube-backend","Backend","0.5.0","Red Hat Tech Preview","./dynamic-plugins/dist/backstage-community-plugin-sonarqube-backend-dynamic","`SONARQUBE_URL`;`SONARQUBE_TOKEN`;","Disabled"
-"SonarQube ","@backstage-community/plugin-scaffolder-backend-module-sonarqube","Backend","2.4.0","Red Hat Tech Preview","./dynamic-plugins/dist/backstage-community-plugin-scaffolder-backend-module-sonarqube-dynamic",";","Disabled"
-"Tech Radar ","@backstage-community/plugin-tech-radar","Frontend","1.2.0","Red Hat Tech Preview","./dynamic-plugins/dist/backstage-community-plugin-tech-radar",";","Disabled"
-"Tech Radar ","@backstage-community/plugin-tech-radar-backend","Backend","1.2.0","Red Hat Tech Preview","./dynamic-plugins/dist/backstage-community-plugin-tech-radar-backend-dynamic","`TECH_RADAR_DATA_URL`;","Disabled"
+"SonarQube ","@backstage-community/plugin-scaffolder-backend-module-sonarqube","Backend","2.5.0","Red Hat Tech Preview","./dynamic-plugins/dist/backstage-community-plugin-scaffolder-backend-module-sonarqube-dynamic",";","Disabled"
+"Tech Radar ","@backstage-community/plugin-tech-radar","Frontend","1.3.0","Red Hat Tech Preview","./dynamic-plugins/dist/backstage-community-plugin-tech-radar",";","Disabled"
+"Tech Radar ","@backstage-community/plugin-tech-radar-backend","Backend","1.3.0","Red Hat Tech Preview","./dynamic-plugins/dist/backstage-community-plugin-tech-radar-backend-dynamic","`TECH_RADAR_DATA_URL`;","Disabled"
"Utils ","@roadiehq/scaffolder-backend-module-utils","Backend","3.3.0","Red Hat Tech Preview","./dynamic-plugins/dist/roadiehq-scaffolder-backend-module-utils-dynamic",";","Disabled"
-"Argo CD ","@roadiehq/backstage-plugin-argo-cd","Frontend","2.8.4","Community Support","./dynamic-plugins/dist/roadiehq-backstage-plugin-argo-cd",";","Disabled"
-"Global Floating Action Button ","@red-hat-developer-hub/backstage-plugin-global-floating-action-button","Frontend","1.0.0","Community Support","./dynamic-plugins/dist/red-hat-developer-hub-backstage-plugin-global-floating-action-button",";","Enabled"
diff --git a/modules/installation/proc-install-operator.adoc b/modules/installation/proc-install-operator.adoc
index 884b2000fa..6af2071eab 100644
--- a/modules/installation/proc-install-operator.adoc
+++ b/modules/installation/proc-install-operator.adoc
@@ -5,13 +5,7 @@
[id="proc-install-operator_{context}"]
= Installing the {product} Operator
-As an administrator, you can install the {product} Operator. Authorized users can use the Operator to install {product} on the following platforms:
-
-* {ocp-brand-name} ({ocp-short})
-* {eks-brand-name} ({eks-short})
-* {aks-brand-name} ({aks-short})
-
-For more information on {ocp-short} supported versions, see the link:https://access.redhat.com/support/policy/updates/developerhub[{product} Life Cycle].
+As an administrator, you can install the {product} Operator. Authorized users can use the Operator to install {product} on {ocp-brand-name} ({ocp-short}) and supported Kubernetes platforms. For more information on supported platforms and versions, see the link:https://access.redhat.com/support/policy/updates/developerhub[{product} Life Cycle] page.
Containers are available for the following CPU architectures:
@@ -21,7 +15,39 @@ Containers are available for the following CPU architectures:
* You are logged in as an administrator on the {ocp-short} web console.
* You have configured the appropriate roles and permissions within your project to create or access an application. For more information, see the link:https://docs.redhat.com/en/documentation/openshift_container_platform/{ocp-version}/html-single/building_applications/index#building-applications-overview[{ocp-brand-name} documentation on Building applications].
+* You have installed {ocp-brand-name} 4.17 or later.
+
+.Procedure
+. In the *Administrator* perspective of the {ocp-short} web console, click *Operators > OperatorHub*.
+. In the *Filter by keyword* box, enter {product-short} and click the *{product} Operator* card.
+. On the *{product} Operator* page, read the information about the Operator and click *Install* to open the *Install Operator* page.
+. From the *Update channel* drop-down menu, select the update channel that you want to use, for example, *fast* or *fast-{product-version}*.
++
+[IMPORTANT]
+====
+The `*fast* channel includes all of the updates available for a particular version. Any update might introduce unexpected changes in your {product} deployment. Check the release notes for details about any potentially breaking changes.
+
+The *fast-{product-version}* channel only provides z-stream updates, for example, updating from version {product-version}.1 to {product-version}.2. If you want to update the {product} y-version in the future, for example, updating from {product-version} to {product-version-next}, you must switch to the *fast-{product-version-next}* channel manually.
+====
+. From the *Version* drop-down menu, select the version of the {product} Operator that you want to install. The default version is the latest version available in the selected channel.
+. Select the Operator *Installation mode*.
+//** If you select the *All namespaces on the cluster* option, the Operator is installed in all namespaces. This option is useful for Operators that manage cluster-wide resources.
+//** If you select the *Specific namespace on the cluster* option, the Operator is installed in a specific namespace. This option is useful for Operators that manage resources in a specific namespace.
++
+[NOTE]
+====
+The *All namespaces on the cluster (default)* option is selected by default. The *Specific namespace on the cluster* option is not currently supported.
+====
+. In the *Installed Namespace* field, do one of the following actions:
++
+** Select *Operator recommended Namespace* to create and use the *rhdh-operator* namespace. This option is selected by default.
+** Select *Select a Namespace* to use an alternative namespace.
+*** From the *Select Project* drop-down menu, do one of the following actions:
+**** Select an existing project.
+**** Select *Create Project* to create a new project for the Operator.
+***** On the *Create Project* dialog, enter text into the required fields and click *Create*.
++
[IMPORTANT]
====
For enhanced security, better control over the Operator lifecycle, and preventing potential privilege escalation, install the {product} Operator in a dedicated default `rhdh-operator` namespace. You can restrict other users' access to the Operator resources through role bindings or cluster role bindings.
@@ -29,34 +55,28 @@ For enhanced security, better control over the Operator lifecycle, and preventin
You can also install the Operator in another namespace by creating the necessary resources, such as an Operator group. For more information, see link:https://docs.redhat.com/en/documentation/openshift_container_platform/{ocp-version}/html-single/operators/index#olm-installing-global-namespaces_olm-adding-operators-to-a-cluster[Installing global Operators in custom namespaces].
However, if the {product} Operator shares a namespace with other Operators, then it shares the same update policy as well, preventing the customization of the update policy. For example, if one Operator is set to manual updates, the {product} Operator update policy is also set to manual. For more information, see link:https://docs.redhat.com/en/documentation/openshift_container_platform/{ocp-version}/html-single/operators/index#olm-colocation-namespaces_olm-colocation[Colocation of Operators in a namespace].
-
====
-
-.Procedure
-
-. In the *Administrator* perspective of the {ocp-short} web console, click *Operators > OperatorHub*.
-. In the *Filter by keyword* box, enter {product-short} and click the *{product}* Operator card.
-. On the *{product} Operator* page, click *Install*.
-
-. On the *Install Operator* page, use the *Update channel* drop-down menu to select the update channel that you want to use:
-** The *fast* channel provides y-stream (x.y) and z-stream (x.y.z) updates, for example, updating from version 1.1 to 1.2, or from 1.1.0 to 1.1.1.
+
-[IMPORTANT]
-====
-The `fast` channel includes all of the updates available for a particular version. Any update might introduce unexpected changes in your {product} deployment. Check the release notes for details about any potentially breaking changes.
+. Select the *Update approval* method for the Operator.
+** If you select the *Automatic* option, the Operator is updated without requiring manual confirmation.
+** If you select the *Manual* option, a notification opens when a new update is released in the update channel. The update must be manually approved by an administrator before installation can begin.
+. Click *Install*.
++
+[NOTE]
====
+If you selected a *Manual* approval strategy, the upgrade status of the subscription remains *Upgrading* until you review and approve the install plan. After you click *Approve* on the *Install Plan* page, the subscription upgrade status changes to *Up to date*.
-** The *fast-1.1* channel only provides z-stream updates, for example, updating from version 1.1.1 to 1.1.2. If you want to update the {product} y-version in the future, for example, updating from 1.1 to 1.2, you must switch to the *fast* channel manually.
-
-. On the *Install Operator* page, choose the *Update approval* strategy for the Operator:
-** If you choose the *Automatic* option, the Operator is updated without requiring manual confirmation.
-** If you choose the *Manual* option, a notification opens when a new update is released in the update channel. The update must be manually approved by an administrator before installation can begin.
-
-. Click *Install*.
+If you selected an *Automatic* approval strategy, the upgrade status should resolve to *Up to date* without intervention.
+====
.Verification
-* To view the installed {product} Operator, click *View Operator*.
+* Immediately after the Operator is installed, the dialog box on the *OperatorHub* page displays the *Installed operator: ready for use* message.
+* From the dialog box, do one of the following actions:
+ ** Click *View Operator* to open the *Operator details* page for the {product} Operator.
+ ** Click *View all installed operators* to open the *Installed Operators* page.
+ *** From the list of installed Operators, locate the {product} Operator name and details.
+ *** Click *{product} Operator* to open the *Operator details* page for the {product} Operator.
[role="_additional-resources"]
.Additional resources
diff --git a/modules/installation/proc-install-rhdh-airgapped-full-k8s-helm.adoc b/modules/installation/proc-install-rhdh-airgapped-full-k8s-helm.adoc
new file mode 100644
index 0000000000..6220f9882b
--- /dev/null
+++ b/modules/installation/proc-install-rhdh-airgapped-full-k8s-helm.adoc
@@ -0,0 +1,234 @@
+[id="proc-install-rhdh-airgapped-full-k8s-helm_{context}"]
+= Installing {product} on a supported Kubernetes platform in a fully disconnected environment with the Helm chart
+
+In environments without internet access, a fully disconnected installation ensures that {product} can run reliably without external dependencies. This approach involves mirroring images and transferring them manually to the air-gapped environment.
+
+.Prerequisites
+
+* You have installed Skopeo 1.17 or later
+* You have installed Yq 4.4 or later
+* You authenticated to registry.redhat.io for pulling images by using the `skopeo login` command.
+* You have access to the Kubernetes cluster with `kubectl` configured
+* You have installed Helm 3.13 or later on the air-gapped host
+
+.Procedure
+
+. On the mirroring host, in a terminal, fetch the Helm charts values by running the following commands:
++
+[source,terminal,subs="+quotes"]
+----
+helm repo add __ https://charts.openshift.io/
+helm repo update
+helm show values __/redhat-developer-hub --version __ values.default.yaml
+helm pull __/redhat-developer-hub --version __
+----
++
+where
+
+__ :: Specifies the name of the Helm chart repository, for example, `openshift-helm-charts`.
+__ :: Specifies the {product} version that you want to use, for example, `{product-chart-version}`.
++
+[NOTE]
+====
+The `helm pull __/redhat-developer-hub --version __` command automatically creates the Helm chart archive file and downloads the Helm chart to your current working directory.
+====
++
+. Extract the image digests by running the following commands:
++
+[source,terminal,subs="+quotes"]
+----
+RHDH_IMAGE=$(yq '.upstream.backstage.image | .registry + "/" + .repository' values.default.yaml)
+RHDH_DIGEST=$(yq '.upstream.backstage.image.tag' values.default.yaml)
+PG_IMAGE=$(yq '.upstream.postgresql.image | .registry + "/" + .repository' values.default.yaml)
+PG_DIGEST=$(yq '.upstream.postgresql.image.tag' values.default.yaml)
+----
+. Mirror the images to your local archive by running the following commands:
++
+[source,terminal,subs="+quotes"]
+----
+skopeo login registry.redhat.io
+skopeo copy --all docker://${RHDH_IMAGE}:${RHDH_DIGEST} dir:./rhdh-hub
+skopeo copy --all docker://${PG_IMAGE}:${PG_DIGEST} dir:./postgresql
+----
+
+. Transfer the following files and directories to your air-gapped environment:
++
+* rhdh-hub
+* postgresql
+* Helm chart archive file, for example, `redhat-developer-hub-{product-chart-version}.tgz`
++
+. Load the images onto the air-gapped host by running the following commands:
++
+[source,terminal,subs="+quotes"]
+----
+skopeo copy --all dir:./rhdh-hub docker://__/__:${RHDH_DIGEST}
+
+skopeo copy --all dir:./postgresql docker://__/__:${PG_DIGEST}
+----
++
+where
+
+__ :: Specifies the name of the target mirror registry that you want to push the images to, for example, `registry.example.com`.
+
+__ :: Specifies the name of the repository where your {product} image is stored, for example, `rhdh/rhdh-hub-rhel9`. This value must match the name of the {product} image that you loaded onto the air-gapped host.
+
+__ :: Specifies the name of the repository where your PostgreSQL image is stored, for example, `rhdh/postgresql-15`.
++
+. Create a `values.yaml` file for the Kubernetes platform that you want to use and add the following image references to the file to reflect local use:
++
+[source,yaml,subs="+quotes"]
+----
+upstream:
+ backstage:
+ image:
+ registry: "__"
+ repository: __
+ tag: "${RHDH_DIGEST}"
+
+ postgresql:
+ image:
+ registry: "__"
+ repository: __
+ tag: "${PG_DIGEST}"
+
+----
++
+where
+
+__ :: Specifies the name of the target mirror registry that you want to push the images to, for example, `registry.example.com`.
+
+__ :: Specifies the name of the repository where your {product} image is stored, for example, `rhdh/rhdh-hub-rhel9`. This value must match the name of the {product} image that you loaded onto the air-gapped host.
+
+__ :: Specifies the name of the repository where your PostgreSQL image is stored, for example, `rhdh/postgresql-15`.
++
+* For {aks-short}, use the following `values.yaml` file template:
++
+[source,yaml,subs="+quotes"]
+----
+global:
+ host:
+route:
+ enabled: false
+upstream:
+ ingress:
+ enabled: true
+ className: webapprouting.kubernetes.azure.com
+ host:
+ backstage:
+ image:
+ pullSecrets:
+ - rhdh-pull-secret
+ podSecurityContext:
+ fsGroup: 3000
+ postgresql:
+ image:
+ pullSecrets:
+ - rhdh-pull-secret
+ primary:
+ podSecurityContext:
+ enabled: true
+ fsGroup: 3000
+ volumePermissions:
+ enabled: true
+----
++
+* For {eks-short}, use the following `values.yaml` file template:
++
+[source,yaml,subs="+quotes"]
+----
+global:
+ # TODO: Set your application domain name.
+ host:
+
+route:
+ enabled: false
+
+upstream:
+ service:
+ # NodePort is required for the ALB to route to the Service
+ type: NodePort
+
+ ingress:
+ enabled: true
+ annotations:
+ kubernetes.io/ingress.class: alb
+
+ alb.ingress.kubernetes.io/scheme: internet-facing
+
+ # TODO: Using an ALB HTTPS Listener requires a certificate for your own domain. Fill in the ARN of your certificate, e.g.:
+ alb.ingress.kubernetes.io/certificate-arn: arn:aws:acm:xxx:xxxx:certificate/xxxxxx
+
+ alb.ingress.kubernetes.io/listen-ports: '[{"HTTP": 80}, {"HTTPS":443}]'
+
+ alb.ingress.kubernetes.io/ssl-redirect: '443'
+
+ # TODO: Set your application domain name.
+ external-dns.alpha.kubernetes.io/hostname:
+
+ backstage:
+ image:
+ pullSecrets:
+ - rhdh-pull-secret
+ podSecurityContext:
+ # you can assign any random value as fsGroup
+ fsGroup: 2000
+ postgresql:
+ image:
+ pullSecrets:
+ - rhdh-pull-secret
+ primary:
+ podSecurityContext:
+ enabled: true
+ # you can assign any random value as fsGroup
+ fsGroup: 3000
+ volumePermissions:
+ enabled: true
+----
++
+* For {gke-short}, use the following `values.yaml` file template:
++
+[source,yaml,subs="+quotes"]
+----
+global:
+ host:
+route:
+ enabled: false
+upstream:
+ service:
+ type: NodePort
+ ingress:
+ enabled: true
+ annotations:
+ kubernetes.io/ingress.class: gce
+ kubernetes.io/ingress.global-static-ip-name:
+ networking.gke.io/managed-certificates:
+ networking.gke.io/v1beta1.FrontendConfig:
+ className: gce
+ backstage:
+ image:
+ pullSecrets:
+ - rhdh-pull-secret
+ podSecurityContext:
+ fsGroup: 2000
+ postgresql:
+ image:
+ pullSecrets:
+ - rhdh-pull-secret
+ primary:
+ podSecurityContext:
+ enabled: true
+ fsGroup: 3000
+ volumePermissions:
+ enabled: true
+----
++
+. Install the Helm chart in the current namespace by running the following command:
++
+[source,terminal,subs="+quotes"]
+----
+helm install rhdh ./__ -f values.yaml
+----
++
+where
+
+__ :: Specifies the name of the Helm chart archive file, for example, `redhat-developer-hub-1.4.0.tgz`.
diff --git a/modules/installation/proc-install-rhdh-airgapped-partial-k8s-helm.adoc b/modules/installation/proc-install-rhdh-airgapped-partial-k8s-helm.adoc
new file mode 100644
index 0000000000..9d2a67bf7c
--- /dev/null
+++ b/modules/installation/proc-install-rhdh-airgapped-partial-k8s-helm.adoc
@@ -0,0 +1,215 @@
+[id="proc-install-rhdh-airgapped-partial-k8s-helm_{context}"]
+= Installing {product} on a supported Kubernetes platform in a partially disconnected environment with the Helm chart
+
+In a partially disconnected environment, the cluster cannot access external registries, for example, registry.redhat.io, but it can access an internal mirror registry. This method requires direct access to an internal mirror registry from the cluster.
+
+.Prerequisites
+
+* You have set up your workstation.
+** You have installed Skopeo 1.17 or later
+** You have installed Yq 4.4 or later
+** You have installed Helm 3.13 or later
+** You have an active Skopeo session against registry.redhat.io
+** You have an active Skopeo session against your target mirror registry, for example, `registry.internal.example.com`
+** You have access to the Kubernetes cluster with `kubectl` configured
+
+.Procedure
+
+. In a terminal, download and extract the Helm chart by running the following commands:
++
+[source,terminal,subs="attributes+"]
+----
+helm repo add __ https://charts.openshift.io/
+helm repo update
+helm pull __/redhat-developer-hub --version __
+helm show values __/redhat-developer-hub --version __ > values.default.yaml
+----
++
+where
+
+__ :: Specifies the name of the Helm chart repository, for example, `openshift-helm-charts`.
+__ :: Specifies the {product} version that you want to use, for example, `{product-chart-version}`.
++
+. Use `yq` to extract the image digests by running the following commands:
++
+[source,terminal,subs="attributes+"]
+----
+RHDH_IMAGE=$(yq '.upstream.backstage.image | .registry + "/" + .repository' values.default.yaml)
+RHDH_DIGEST=$(yq '.upstream.backstage.image.tag' values.default.yaml)
+PG_IMAGE=$(yq '.upstream.postgresql.image | .registry + "/" + .repository' values.default.yaml)
+PG_DIGEST=$(yq '.upstream.postgresql.image.tag' values.default.yaml)
+----
+. Mirror the images to the internal mirror registry by entering the following commands:
++
+[source,terminal,subs="attributes+"]
+----
+skopeo login registry.redhat.io
+
+skopeo login __
+
+skopeo copy --remove-signatures \
+ docker://${PG_IMAGE}@${PG_DIGEST} \
+ docker://__/__:${PG_DIGEST}
+
+skopeo copy --remove-signatures \
+ docker://${RHDH_IMAGE}@${RHDH_DIGEST} \
+ docker://__/__${RHDH_DIGEST}
+----
++
+where
+
+__ :: Specifies the name of the internal mirror registry, for example, `registry.internal.example.com`.
+
+__ :: Specifies the name of the PostgreSQL repository, for example, `rhdh/postgresql-15`.
+
+__ :: Specifies the name of the {product} repository, for example, `rhdh/rhdh-hub-rhel9`.
++
+. Create a `values.yaml` file for the Kubernetes platform that you want to use and add the following image references to the file to reflect local use:
++
+[source,yaml,subs="attributes+"]
+----
+upstream:
+ backstage:
+ image:
+ registry: "__"
+ repository: __
+ tag: "${RHDH_DIGEST}"
+
+ postgresql:
+ image:
+ registry: "__"
+ repository: __
+ tag: "${PG_DIGEST}"
+----
++
+* For {aks-short}, use the following `values.yaml` file template:
++
+[source,yaml,subs="+quotes"]
+----
+global:
+ host:
+route:
+ enabled: false
+upstream:
+ ingress:
+ enabled: true
+ className: webapprouting.kubernetes.azure.com
+ host:
+ backstage:
+ image:
+ pullSecrets:
+ - rhdh-pull-secret
+ podSecurityContext:
+ fsGroup: 3000
+ postgresql:
+ image:
+ pullSecrets:
+ - rhdh-pull-secret
+ primary:
+ podSecurityContext:
+ enabled: true
+ fsGroup: 3000
+ volumePermissions:
+ enabled: true
+----
++
+* For {eks-short}, use the following `values.yaml` file template:
++
+[source,yaml,subs="+quotes"]
+----
+global:
+ # TODO: Set your application domain name.
+ host:
+
+route:
+ enabled: false
+
+upstream:
+ service:
+ # NodePort is required for the ALB to route to the Service
+ type: NodePort
+
+ ingress:
+ enabled: true
+ annotations:
+ kubernetes.io/ingress.class: alb
+
+ alb.ingress.kubernetes.io/scheme: internet-facing
+
+ # TODO: Using an ALB HTTPS Listener requires a certificate for your own domain. Fill in the ARN of your certificate, e.g.:
+ alb.ingress.kubernetes.io/certificate-arn: arn:aws:acm:xxx:xxxx:certificate/xxxxxx
+
+ alb.ingress.kubernetes.io/listen-ports: '[{"HTTP": 80}, {"HTTPS":443}]'
+
+ alb.ingress.kubernetes.io/ssl-redirect: '443'
+
+ # TODO: Set your application domain name.
+ external-dns.alpha.kubernetes.io/hostname:
+
+ backstage:
+ image:
+ pullSecrets:
+ - rhdh-pull-secret
+ podSecurityContext:
+ # you can assign any random value as fsGroup
+ fsGroup: 2000
+ postgresql:
+ image:
+ pullSecrets:
+ - rhdh-pull-secret
+ primary:
+ podSecurityContext:
+ enabled: true
+ # you can assign any random value as fsGroup
+ fsGroup: 3000
+ volumePermissions:
+ enabled: true
+----
++
+* For {gke-short}, use the following `values.yaml` file template:
++
+[source,yaml,subs="+quotes"]
+----
+global:
+ host:
+route:
+ enabled: false
+upstream:
+ service:
+ type: NodePort
+ ingress:
+ enabled: true
+ annotations:
+ kubernetes.io/ingress.class: gce
+ kubernetes.io/ingress.global-static-ip-name:
+ networking.gke.io/managed-certificates:
+ networking.gke.io/v1beta1.FrontendConfig:
+ className: gce
+ backstage:
+ image:
+ pullSecrets:
+ - rhdh-pull-secret
+ podSecurityContext:
+ fsGroup: 2000
+ postgresql:
+ image:
+ pullSecrets:
+ - rhdh-pull-secret
+ primary:
+ podSecurityContext:
+ enabled: true
+ fsGroup: 3000
+ volumePermissions:
+ enabled: true
+----
++
+. Install the Helm chart in the current namespace by running the following command:
++
+[source,terminal,subs="attributes+"]
+----
+helm install rhdh ./__ -f values.yaml
+----
++
+where
+
+__ :: Specifies the name of the Helm chart archive file, for example, `redhat-developer-hub-{product-chart-version}.tgz`.
diff --git a/modules/installation/proc-rhdh-deploy-eks-operator.adoc b/modules/installation/proc-rhdh-deploy-eks-operator.adoc
index 0e4c0eed94..22671c3d97 100644
--- a/modules/installation/proc-rhdh-deploy-eks-operator.adoc
+++ b/modules/installation/proc-rhdh-deploy-eks-operator.adoc
@@ -2,14 +2,8 @@
// assembly-install-rhdh-eks.adoc
[id='proc-rhdh-deploy-eks-operator_{context}']
-= Installing {product-short} on {eks-short} with the Operator
-The {product} Operator installation requires the Operator Lifecycle Manager (OLM) framework.
-
-.Additional resources
-* For information about the OLM, see link:https://olm.operatorframework.io/docs/[Operator Lifecycle Manager(OLM)] documentation.
-
-== Installing the {product-short} Operator with the OLM framework
+= Installing the {product-short} Operator with the OLM framework
You can install the {product-short} Operator on {eks-short} using the https://olm.operatorframework.io[Operator Lifecycle Manager (OLM) framework]. Following that, you can proceed to deploy your {product-short} instance in {eks-short}.
diff --git a/modules/observe/adoption-insights/con-about-adoption-insights.adoc b/modules/observe/adoption-insights/con-about-adoption-insights.adoc
index 21e4bb2112..5cec347e60 100644
--- a/modules/observe/adoption-insights/con-about-adoption-insights.adoc
+++ b/modules/observe/adoption-insights/con-about-adoption-insights.adoc
@@ -6,6 +6,11 @@ As organizations generate an increasing number of data events, there is a growin
You can use Adoption Insights in {product} to visualize key metrics and trends to get information about the usage of {product-short} in your organization. The information provided by Adoption Insights in {product-short} pinpoints areas of improvement, highlights popular features, and evaluates progress toward adoption goals. You can also monitor user growth against license users and identify trends over time.
+[NOTE]
+====
+Currently, the Adoption Insights plugin cannot be used alongside the built-in `plugin-analytics-provider-segment` plugin. For a workaround, see link:{release-notes-book-url}#developer-preview-rhdhpai-510[Adoption Insights in {product}.]
+====
+
The Adoption Insights dashboard in {product-short} includes the following cards:
* *Active users*
diff --git a/modules/observe/adoption-insights/proc-configure-adoption-insights.adoc b/modules/observe/adoption-insights/proc-configure-adoption-insights.adoc
index 94326b0f50..d49a5bbb04 100644
--- a/modules/observe/adoption-insights/proc-configure-adoption-insights.adoc
+++ b/modules/observe/adoption-insights/proc-configure-adoption-insights.adoc
@@ -11,7 +11,7 @@ You can enable the Adoption Insights plugin by configuring the {product} Helm ch
.`app-config.yaml` fragment
[source,terminal]
----
- App:
+app:
analytics:
adoptionInsights:
maxBufferSize: 20 <1>
diff --git a/modules/observe/adoption-insights/proc-install-adoption-insights.adoc b/modules/observe/adoption-insights/proc-install-adoption-insights.adoc
index b53acc5a57..45f9f35cd3 100644
--- a/modules/observe/adoption-insights/proc-install-adoption-insights.adoc
+++ b/modules/observe/adoption-insights/proc-install-adoption-insights.adoc
@@ -14,35 +14,44 @@ For the {product} Adoption Insights plugin, you must manually install the plugin
+
[source,yaml]
----
-- package: oci://quay.io/__/adoption-insights:latest!red-hat-developer-hub-backstage-plugin-adoption-insights
- disabled: false
- pluginConfig:
- dynamicPlugins:
- frontend:
- red-hat-developer-hub.backstage-plugin-adoption-insights:
- appIcons:
- - name: adoptionInsightsIcon
- importName: AdoptionInsightsIcon
- dynamicRoutes:
- - path: /adoption-insights
- importName: AdoptionInsightsPage
- menuItem:
+ - package: ./dynamic-plugins/dist/backstage-community-plugin-analytics-provider-segment
+ disabled: true
+ - package: oci://ghcr.io/redhat-developer/rhdh-plugin-export-overlays/red-hat-developer-hub-backstage-plugin-adoption-insights:bs_1.35.1__0.0.3!red-hat-developer-hub-backstage-plugin-adoption-insights
+ disabled: false
+ pluginConfig:
+ dynamicPlugins:
+ frontend:
+ red-hat-developer-hub.backstage-plugin-adoption-insights:
+ appIcons:
+ - name: adoptionInsightsIcon
+ importName: AdoptionInsightsIcon
+ dynamicRoutes:
+ - path: /adoption-insights
+ importName: AdoptionInsightsPage
+ menuItem:
+ icon: adoptionInsightsIcon
+ text: Adoption Insights
+ menuItems:
+ adoption-insights:
+ parent: admin
icon: adoptionInsightsIcon
- text: Adoption Insights
- menuItems:
- adoption-insights:
- parent: admin
- icon: adoptionInsightsIcon
-
-- package: oci://quay.io/__/adoption-insights:latest!red-hat-developer-hub-backstage-plugin-adoption-insights-backend-dynamic
- disabled: false
+ - package: oci://ghcr.io/redhat-developer/rhdh-plugin-export-overlays/red-hat-developer-hub-backstage-plugin-adoption-insights-backend:bs_1.35.1__0.0.4!red-hat-developer-hub-backstage-plugin-adoption-insights-backend
+ disabled: false
+ - package: oci://ghcr.io/redhat-developer/rhdh-plugin-export-overlays/red-hat-developer-hub-backstage-plugin-analytics-module-adoption-insights:bs_1.35.1__0.0.2!red-hat-developer-hub-backstage-plugin-analytics-module-adoption-insights
+ disabled: false
+ pluginConfig:
+ dynamicPlugins:
+ frontend:
+ red-hat-developer-hub.backstage-plugin-analytics-module-adoption-insights:
+ apiFactories:
+ - importName: AdoptionInsightsAnalyticsApiFactory
+----
-- package: oci://quay.io/__/adoption-insights:latest!red-hat-developer-hub-backstage-plugin-analytics-module-adoption-insights
- disabled: false
- pluginConfig:
- dynamicPlugins:
- frontend:
- red-hat-developer-hub.backstage-plugin-analytics-module-adoption-insights:
- apiFactories:
- - importName: AdoptionInsightsAnalyticsApiFactory
-----
\ No newline at end of file
+. Optional: Configure the required RBAC permission for the users who are not administrators as shown in the following example:
++
+[source,yaml]
+----
+p, role:default/__, adoption-insights.events.read, read, allow
+g, user:default/__, role:default/__
+----
+See link:{authorization-book-url}#ref-rbac-permission-policies_title-authorization[Permission policies in {product}].
diff --git a/modules/observe/proc-audit-log-view.adoc b/modules/observe/proc-audit-log-view.adoc
index 3a77b8249e..2fd0ddf1f3 100644
--- a/modules/observe/proc-audit-log-view.adoc
+++ b/modules/observe/proc-audit-log-view.adoc
@@ -5,7 +5,7 @@
[id="proc-audit-log-view_{context}"]
= Viewing audit logs in {product-short}
-Administrators can view, search, filter, and manage the log data from the {ocp-brand-name} web console. You can filter audit logs from other log types by using the `isAuditLog` field.
+Administrators can view, search, filter, and manage the log data from the {ocp-brand-name} web console. You can filter audit logs from other log types by using the `isAuditEvent` field.
.Prerequisites
* You are logged in as an administrator in the {ocp-short} web console.
@@ -16,4 +16,4 @@ Administrators can view, search, filter, and manage the log data from the {ocp-b
. From the *Topology* view, click the pod that you want to view audit log data for.
. From the pod panel, click the *Resources* tab.
. From the *Pods* section of the *Resources* tab, click *View logs*.
-. From the *Logs* view, enter `isAuditLog` into the *Search* field to filter audit logs from other log types. You can use the arrows to browse the logs containing the `isAuditLog` field.
+. From the *Logs* view, enter `isAuditEvent` into the *Search* field to filter audit logs from other log types. You can use the arrows to browse the logs containing the `isAuditEvent` field.
diff --git a/modules/observe/proc-configuring-annotations-for-monitoring-with-amazon-prometheus-by-using-the-helm-chart.adoc b/modules/observe/proc-configuring-annotations-for-monitoring-with-amazon-prometheus-by-using-the-helm-chart.adoc
new file mode 100644
index 0000000000..14ba807798
--- /dev/null
+++ b/modules/observe/proc-configuring-annotations-for-monitoring-with-amazon-prometheus-by-using-the-helm-chart.adoc
@@ -0,0 +1,34 @@
+[id="configuring-annotations-for-monitoring-with-amazon-prometheus-by-using-the-helm-chart_{context}"]
+= Configuring annotations for monitoring with Amazon Prometheus by using the {product} Helm chart
+
+To enable logging to Amazon Prometheus, you can configure the required pod annotations by using the {product} Helm chart.
+
+.Procedure
+* To annotate the backstage pod for monitoring, update your `values.yaml` file as follows:
++
+[source,yaml]
+----
+upstream:
+ backstage:
+ # --- TRUNCATED ---
+ podAnnotations:
+ prometheus.io/scrape: 'true'
+ prometheus.io/path: '/metrics'
+ prometheus.io/port: '9464'
+ prometheus.io/scheme: 'http'
+----
+
+.Verification
+To verify if the scraping works:
+
+. Use `kubectl` to port-forward the Prometheus console to your local machine as follows:
++
+[source,bash]
+----
+kubectl --namespace=prometheus port-forward deploy/prometheus-server 9090
+----
+
+. Open your web browser and navigate to `pass:c[http://localhost:9090]` to access the Prometheus console.
+
+. Monitor relevant metrics, such as `process_cpu_user_seconds_total`.
+
diff --git a/modules/observe/proc-configuring-annotations-for-monitoring-with-amazon-prometheus-by-using-the-operator.adoc b/modules/observe/proc-configuring-annotations-for-monitoring-with-amazon-prometheus-by-using-the-operator.adoc
new file mode 100644
index 0000000000..2caaeb535b
--- /dev/null
+++ b/modules/observe/proc-configuring-annotations-for-monitoring-with-amazon-prometheus-by-using-the-operator.adoc
@@ -0,0 +1,51 @@
+[id="configuring-annotations-for-monitoring-with-amazon-prometheus-by-using-the-operator_{context}"]
+= Configuring annotations for monitoring with Amazon Prometheus by using the {product} Operator
+
+To enable logging to Amazon Prometheus, you can configure the required pod annotations by using the {product} Operator.
+
+.Procedure
+. As an administrator of the {product} Operator, edit the default configuration to add Prometheus annotations as follows:
++
+----
+# Update OPERATOR_NS accordingly
+$ OPERATOR_NS=rhdh-operator
+$ kubectl edit configmap backstage-default-config -n "${OPERATOR_NS}"
+----
+
+. Find the `deployment.yaml` key in the config map and add the annotations to the `spec.template.metadata.annotations` field as follows:
++
+[source,yaml]
+----
+deployment.yaml: |-
+ apiVersion: apps/v1
+ kind: Deployment
+ # --- truncated ---
+ spec:
+ template:
+ # --- truncated ---
+ metadata:
+ labels:
+ rhdh.redhat.com/app: # placeholder for 'backstage-'
+ # --- truncated ---
+ annotations:
+ prometheus.io/scrape: 'true'
+ prometheus.io/path: '/metrics'
+ prometheus.io/port: '9464'
+ prometheus.io/scheme: 'http'
+ # --- truncated ---
+----
+
+. Save your changes.
+
+.Verification
+To verify if the scraping works:
+
+. Use `kubectl` to port-forward the Prometheus console to your local machine as follows:
++
+----
+$ kubectl --namespace=prometheus port-forward deploy/prometheus-server 9090
+----
+
+. Open your web browser and navigate to `pass:c[http://localhost:9090]` to access the Prometheus console.
+. Monitor relevant metrics, such as `process_cpu_user_seconds_total`.
+
diff --git a/modules/observe/proc-configuring-the-application-log-level-for-logging-with-amazon-cloudwatch-logs-by-using-the-helm-chart.adoc b/modules/observe/proc-configuring-the-application-log-level-for-logging-with-amazon-cloudwatch-logs-by-using-the-helm-chart.adoc
new file mode 100644
index 0000000000..40349b7a1b
--- /dev/null
+++ b/modules/observe/proc-configuring-the-application-log-level-for-logging-with-amazon-cloudwatch-logs-by-using-the-helm-chart.adoc
@@ -0,0 +1,17 @@
+[id="configuring-the-application-log-level-by-using-the-helm-chart_{context}"]
+= Configuring the application log level by using the {product} Helm chart
+
+You can configure the application log level by using the {product} Helm chart.
+
+.Procedure
+* Modify the logging level by adding the environment variable `LOG_LEVEL` to your Helm chart `values.yaml` file:
++
+[source,yaml]
+----
+upstream:
+ backstage:
+ # --- Truncated ---
+ extraEnvVars:
+ - name: LOG_LEVEL
+ value: debug
+----
diff --git a/modules/observe/proc-configuring-the-application-log-level-for-logging-with-amazon-cloudwatch-logs-by-using-the-operator.adoc b/modules/observe/proc-configuring-the-application-log-level-for-logging-with-amazon-cloudwatch-logs-by-using-the-operator.adoc
new file mode 100644
index 0000000000..7a6ea6a531
--- /dev/null
+++ b/modules/observe/proc-configuring-the-application-log-level-for-logging-with-amazon-cloudwatch-logs-by-using-the-operator.adoc
@@ -0,0 +1,18 @@
+[id="configuring-the-application-log-level-by-using-the-operator_{context}"]
+= Configuring the application log level by using the {product} Operator
+
+You can configure the application log level by using the {product} Operator.
+
+.Procedure
+* Modify the logging level by including the environment variable `LOG_LEVEL` in your custom resource as follows:
++
+[source,yaml]
+----
+spec:
+ # Other fields omitted
+ application:
+ extraEnvs:
+ envs:
+ - name: LOG_LEVEL
+ value: debug
+----
diff --git a/modules/observe/proc-forward-audit-log-splunk.adoc b/modules/observe/proc-forward-audit-log-splunk.adoc
index cfa5272d19..f3ebe394d4 100644
--- a/modules/observe/proc-forward-audit-log-splunk.adoc
+++ b/modules/observe/proc-forward-audit-log-splunk.adoc
@@ -130,7 +130,7 @@ filters:
drop:
- test:
- field: .message
- notMatches: isAuditLog
+ notMatches: isAuditEvent
----
For more information, see link:https://docs.redhat.com/en/documentation/openshift_container_platform/4.16/html-single/logging/index#logging-content-filtering[Filtering logs by content] in {ocp-short} documentation.
--
diff --git a/modules/observe/proc-retrieving-logs-from-amazon-cloudwatch.adoc b/modules/observe/proc-retrieving-logs-from-amazon-cloudwatch.adoc
new file mode 100644
index 0000000000..e1eb6c37ae
--- /dev/null
+++ b/modules/observe/proc-retrieving-logs-from-amazon-cloudwatch.adoc
@@ -0,0 +1,25 @@
+[id="retrieving-logs-from-amazon-cloudwatch_{context}"]
+= Retrieving logs from Amazon CloudWatch
+
+.Prerequisites
+* CloudWatch Container Insights is used to capture logs and metrics for {eks-brand-name}.
+For more information, see https://docs.aws.amazon.com/prescriptive-guidance/latest/implementing-logging-monitoring-cloudwatch/kubernetes-eks-logging.html[Logging for {eks-brand-name}] documentation.
+
+* To capture the logs and metrics, link:https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/Container-Insights-setup-EKS-addon.html[install the Amazon CloudWatch Observability EKS add-on] in your cluster.
+Following the setup of Container Insights, you can access container logs using Logs Insights or Live Tail views.
+
+* CloudWatch names the log group where all container logs are consolidated in the following manner:
++
+[subs="+quotes"]
+----
+/aws/containerinsights/__/application
+----
+
+.Procedure
+* To retrieve logs from the {product-short} instance, run a query such as:
++
+[source,sql]
+----
+fields @timestamp, @message, kubernetes.container_name
+| filter kubernetes.container_name in ["install-dynamic-plugins", "backstage-backend"]
+----
diff --git a/modules/observe/proc-rhdh-monitoring-logging-aws.adoc b/modules/observe/proc-rhdh-monitoring-logging-aws.adoc
deleted file mode 100644
index 822688a08e..0000000000
--- a/modules/observe/proc-rhdh-monitoring-logging-aws.adoc
+++ /dev/null
@@ -1,149 +0,0 @@
-[id='proc-rhdh-monitoring-logging-aws_{context}']
-= Monitoring and logging with Amazon Web Services (AWS) in {product}
-
-In the {product}, monitoring and logging are facilitated through Amazon Web Services (AWS) integration. With features like Amazon CloudWatch for real-time monitoring and Amazon Prometheus for comprehensive logging, you can ensure the reliability, scalability, and compliance of your {product-short} application hosted on AWS infrastructure.
-
-This integration enables you to oversee, diagnose, and refine your applications in the Red Hat ecosystem, leading to an improved development and operational journey.
-
-== Monitoring with Amazon Prometheus
-
-{product} provides Prometheus metrics related to the running application. For more information about enabling or deploying Prometheus for EKS clusters, see https://docs.aws.amazon.com/eks/latest/userguide/prometheus.html[Prometheus metrics] in the Amazon documentation.
-
-To monitor {product-short} using https://aws.amazon.com/prometheus/[Amazon Prometheus], you need to create an Amazon managed service for the Prometheus workspace and configure the ingestion of the Developer Hub Prometheus metrics. For more information, see https://docs.aws.amazon.com/prometheus/latest/userguide/AMP-onboard-create-workspace.html[Create a workspace] and https://docs.aws.amazon.com/prometheus/latest/userguide/AMP-onboard-ingest-metrics.html[Ingest Prometheus metrics to the workspace] sections in the Amazon documentation.
-
-After ingesting Prometheus metrics into the created workspace, you can configure the metrics scraping to extract data from pods based on specific pod annotations.
-
-=== Configuring annotations for monitoring
-
-You can configure the annotations for monitoring in both Helm deployment and Operator-backed deployment.
-
-Helm deployment::
-+
---
-To annotate the backstage pod for monitoring, update your `values.yaml` file as follows:
-
-[source,yaml]
-----
-upstream:
- backstage:
- # --- TRUNCATED ---
- podAnnotations:
- prometheus.io/scrape: 'true'
- prometheus.io/path: '/metrics'
- prometheus.io/port: '9464'
- prometheus.io/scheme: 'http'
-----
---
-
-Operator-backed deployment::
-+
---
-.Procedure
-
-. As an administrator of the operator, edit the default configuration to add Prometheus annotations as follows:
-+
-[source,bash]
-----
-# Update OPERATOR_NS accordingly
-OPERATOR_NS=rhdh-operator
-kubectl edit configmap backstage-default-config -n "${OPERATOR_NS}"
-----
-
-. Find the `deployment.yaml` key in the ConfigMap and add the annotations to the `spec.template.metadata.annotations` field as follows:
-+
-[source,yaml]
-----
-deployment.yaml: |-
- apiVersion: apps/v1
- kind: Deployment
- # --- truncated ---
- spec:
- template:
- # --- truncated ---
- metadata:
- labels:
- rhdh.redhat.com/app: # placeholder for 'backstage-'
- # --- truncated ---
- annotations:
- prometheus.io/scrape: 'true'
- prometheus.io/path: '/metrics'
- prometheus.io/port: '9464'
- prometheus.io/scheme: 'http'
- # --- truncated ---
-----
-
-. Save your changes.
---
-
-.Verification
-
-To verify if the scraping works:
-
-. Use `kubectl` to port-forward the Prometheus console to your local machine as follows:
-+
-[source,bash]
-----
-kubectl --namespace=prometheus port-forward deploy/prometheus-server 9090
-----
-
-. Open your web browser and navigate to `pass:c[http://localhost:9090]` to access the Prometheus console.
-. Monitor relevant metrics, such as `process_cpu_user_seconds_total`.
-
-== Logging with Amazon CloudWatch logs
-
-Logging within the {product} relies on the https://github.com/winstonjs/winston[winston library]. By default, logs at the debug level are not recorded. To activate debug logs, you must set the environment variable `LOG_LEVEL` to debug in your {product} instance.
-
-=== Configuring the application log level
-
-You can configure the application log level in both Helm deployment and Operator-backed deployment.
-
-Helm deployment::
-+
---
-To update the logging level, add the environment variable `LOG_LEVEL` to your Helm chart's `values.yaml` file:
-
-[source,yaml]
-----
-upstream:
- backstage:
- # --- Truncated ---
- extraEnvVars:
- - name: LOG_LEVEL
- value: debug
-----
---
-
-Operator-backed deployment::
-+
---
-You can modify the logging level by including the environment variable `LOG_LEVEL` in your custom resource as follows:
-
-[source,yaml]
-----
-spec:
- # Other fields omitted
- application:
- extraEnvs:
- envs:
- - name: LOG_LEVEL
- value: debug
-----
---
-
-=== Retrieving logs from Amazon CloudWatch
-
-The CloudWatch Container Insights are used to capture logs and metrics for Amazon EKS. For more information, see https://docs.aws.amazon.com/prescriptive-guidance/latest/implementing-logging-monitoring-cloudwatch/kubernetes-eks-logging.html[Logging for Amazon EKS] documentation.
-
-To capture the logs and metrics, https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/Container-Insights-setup-EKS-addon.html[install the Amazon CloudWatch Observability EKS add-on] in your cluster. Following the setup of Container Insights, you can access container logs using Logs Insights or Live Tail views.
-
-CloudWatch names the log group where all container logs are consolidated in the following manner:
-
-`/aws/containerinsights//application`
-
-Following is an example query to retrieve logs from the Developer Hub instance:
-
-[source,sql]
-----
-fields @timestamp, @message, kubernetes.container_name
-| filter kubernetes.container_name in ["install-dynamic-plugins", "backstage-backend"]
-----
diff --git a/modules/observe/ref-audit-log-catalog-events.adoc b/modules/observe/ref-audit-log-catalog-events.adoc
deleted file mode 100644
index b47a7847ce..0000000000
--- a/modules/observe/ref-audit-log-catalog-events.adoc
+++ /dev/null
@@ -1,37 +0,0 @@
-// Module included in the following assemblies:
-// assembly-audit-log.adoc
-
-:_mod-docs-content-type: REFERENCE
-[id="ref-audit-log-catalog-events.adoc_{context}"]
-= Catalog events
-
-{product-short} audit logs can include the following catalog events:
-
-`CatalogEntityAncestryFetch`:: Tracks `GET` requests to the `/entities/by-name/:kind/:namespace/:name/ancestry` endpoint, which returns the ancestry of an entity
-`CatalogEntityBatchFetch`:: Tracks `POST` requests to the `/entities/by-refs` endpoint, which returns a batch of entities
-`CatalogEntityDeletion`:: Tracks `DELETE` requests to the `/entities/by-uid/:uid` endpoint, which deletes an entity
-
-[NOTE]
-====
-If the parent location of the deleted entity is still present in the catalog, then the entity is restored in the catalog during the next processing cycle.
-====
-
-`CatalogEntityFacetFetch`:: Tracks `GET` requests to the `/entity-facets` endpoint, which returns the facets of an entity
-`CatalogEntityFetch`:: Tracks `GET` requests to the `/entities` endpoint, which returns a list of entities
-`CatalogEntityFetchByName`:: Tracks `GET` requests to the `/entities/by-name/:kind/:namespace/:name` endpoint, which returns an entity matching the specified entity reference, for example, `:/`
-`CatalogEntityFetchByUid`:: Tracks `GET` requests to the `/entities/by-uid/:uid` endpoint, which returns an entity matching the unique ID of the specified entity
-`CatalogEntityRefresh`:: Tracks `POST` requests to the `/entities/refresh` endpoint, which schedules the specified entity to be refreshed
-`CatalogEntityValidate`:: Tracks `POST` requests to the `/entities/validate` endpoint, which validates the specified entity
-`CatalogLocationCreation`:: Tracks `POST` requests to the `/locations` endpoint, which creates a location
-
-[NOTE]
-====
-A location is a marker that references other places to look for catalog data.
-====
-
-`CatalogLocationAnalyze`:: Tracks `POST` requests to the `/locations/analyze` endpoint, which analyzes the specified location
-`CatalogLocationDeletion`:: Tracks `DELETE` requests to the `/locations/:id` endpoint, which deletes a location and all child entities associated with it
-`CatalogLocationFetch`:: Tracks `GET` requests to the `/locations` endpoint, which returns a list of locations
-`CatalogLocationFetchByEntityRef`:: Tracks `GET` requests to the `/locations/by-entity` endpoint, which returns a list of locations associated with the specified entity reference
-`CatalogLocationFetchById`:: Tracks `GET` requests to the `/locations/:id` endpoint, which returns a location matching the specified location ID
-`QueriedCatalogEntityFetch`:: Tracks `GET` requests to the `/entities/by-query` endpoint, which returns a list of entities matching the specified query
diff --git a/modules/observe/ref-audit-log-fields.adoc b/modules/observe/ref-audit-log-fields.adoc
deleted file mode 100644
index 0e6de692e1..0000000000
--- a/modules/observe/ref-audit-log-fields.adoc
+++ /dev/null
@@ -1,29 +0,0 @@
-// Module included in the following assemblies:
-// assembly-audit-log.adoc
-
-:_mod-docs-content-type: REFERENCE
-[id="ref-audit-log-fields.adoc_{context}"]
-= Audit log fields
-
-{product-short} audit logs can include the following fields:
-
-`eventName`:: The name of the audited event.
-`actor`:: An object containing information about the actor that triggered the audited event. Contains the following fields:
-`actorId`::: The name/id/`entityRef` of the associated user or service. Can be `null` if an unauthenticated user accesses the endpoints and the default authentication policy is disabled.
-`ip`::: The IP address of the actor (optional).
-`hostname`::: The hostname of the actor (optional).
-`client`::: The user agent of the actor (optional).
-`stage`:: The stage of the event at the time that the audit log was generated, for example, `initiation` or `completion`.
-`status`:: The status of the event, for example, `succeeded` or `failed`.
-`meta`:: An optional object containing event specific data, for example, `taskId`.
-`request`:: An optional field that contains information about the HTTP request sent to an endpoint. Contains the following fields:
-`method`::: The HTTP method of the request.
-`query`::: The `query` fields of the request.
-`params`::: The `params` fields of the request.
-`body`::: The request `body`. The `secrets` provided when creating a task are redacted and appear as `***`.
-`url`::: The endpoint URL of the request.
-`response`:: An optional field that contains information about the HTTP response sent from an endpoint. Contains the following fields:
-`status`::: The status code of the HTTP response.
-`body`::: The contents of the request body.
-`isAuditLog`:: A flag set to `true` to differentiate audit logs from other log types.
-`errors`:: A list of errors containing the `name`, `message` and potentially the `stack` field of the error. Only appears when `status` is `failed`.
diff --git a/modules/observe/ref-audit-log-scaffolder-events.adoc b/modules/observe/ref-audit-log-scaffolder-events.adoc
deleted file mode 100644
index 19422794b9..0000000000
--- a/modules/observe/ref-audit-log-scaffolder-events.adoc
+++ /dev/null
@@ -1,23 +0,0 @@
-// Module included in the following assemblies:
-// assembly-audit-log.adoc
-
-:_mod-docs-content-type: REFERENCE
-[id="ref-audit-log-scaffolder-events.adoc_{context}"]
-= Scaffolder events
-
-{product-short} audit logs can include the following scaffolder events:
-
-`ScaffolderParameterSchemaFetch`:: Tracks `GET` requests to the `/v2/templates/:namespace/:kind/:name/parameter-schema` endpoint which return template parameter schemas
-`ScaffolderInstalledActionsFetch`:: Tracks `GET` requests to the `/v2/actions` endpoint which grabs the list of installed actions
-`ScaffolderTaskCreation`:: Tracks `POST` requests to the `/v2/tasks` endpoint which creates tasks that the scaffolder executes
-`ScaffolderTaskListFetch`:: Tracks `GET` requests to the `/v2/tasks` endpoint which fetches details of all tasks in the scaffolder.
-`ScaffolderTaskFetch`:: Tracks `GET` requests to the `/v2/tasks/:taskId` endpoint which fetches details of a specified task `:taskId`
-`ScaffolderTaskCancellation`:: Tracks `POST` requests to the `/v2/tasks/:taskId/cancel` endpoint which cancels a running task
-`ScaffolderTaskStream`:: Tracks `GET` requests to the `/v2/tasks/:taskId/eventstream` endpoint which returns an event stream of the task logs of task `:taskId`
-`ScaffolderTaskEventFetch`:: Tracks `GET` requests to the `/v2/tasks/:taskId/events` endpoint which returns a snapshot of the task logs of task `:taskId`
-`ScaffolderTaskDryRun`:: Tracks `POST` requests to the `/v2/dry-run` endpoint which creates a dry-run task. All audit logs for events associated with dry runs have the `meta.isDryLog` flag set to `true`.
-`ScaffolderStaleTaskCancellation`:: Tracks automated cancellation of stale tasks
-`ScaffolderTaskExecution`:: Tracks the `initiation` and `completion` of a real scaffolder task execution (will not occur during dry runs)
-`ScaffolderTaskStepExecution`:: Tracks `initiation` and `completion` of a scaffolder task step execution
-`ScaffolderTaskStepSkip`:: Tracks steps skipped due to `if` conditionals not being met
-`ScaffolderTaskStepIteration`:: Tracks the step execution of each iteration of a task step that contains the `each` field.
diff --git a/modules/observe/ref-customizing-telemetry-segment.adoc b/modules/observe/ref-customizing-telemetry-segment.adoc
deleted file mode 100644
index 9ee7698713..0000000000
--- a/modules/observe/ref-customizing-telemetry-segment.adoc
+++ /dev/null
@@ -1,12 +0,0 @@
-[id="customizing-telemetry-segment_{context}"]
-= Customizing telemetry Segment source
-
-
-The `analytics-provider-segment` plugin sends the collected telemetry data to {company-name} by default. However, you can configure a new Segment source that receives telemetry data based on your needs. For configuration, you need a unique Segment write key that points to the Segment source.
-
-[NOTE]
-====
-By configuring a new Segment source, you can collect and analyze the same set of data that is mentioned in link:{telemetry-data-collection-book-url}[{telemetry-data-collection-book-title}]. You might also require to create your own telemetry data collection notice for your application users.
-====
-
-
diff --git a/modules/observe/ref-disabling-telemetry.adoc b/modules/observe/ref-disabling-telemetry.adoc
deleted file mode 100644
index 66bf4470e1..0000000000
--- a/modules/observe/ref-disabling-telemetry.adoc
+++ /dev/null
@@ -1,6 +0,0 @@
-[id="disabling-telemetry-data-collection_{context}"]
-= Disabling telemetry data collection in {product-very-short}
-
-To disable telemetry data collection, you must disable the `analytics-provider-segment` plugin either using the Helm Chart or the {product} Operator configuration.
-
-
diff --git a/modules/release-notes/list-fixed-security-issues-in-product-1.5.0.txt b/modules/release-notes/list-fixed-security-issues-in-product-1.5.0.txt
deleted file mode 100644
index cb14d561b9..0000000000
--- a/modules/release-notes/list-fixed-security-issues-in-product-1.5.0.txt
+++ /dev/null
@@ -1,8 +0,0 @@
-CVE-2024-56326
-CVE-2024-56201
-CVE-2024-45338
-CVE-2024-52798
-CVE-2024-56334
-CVE-2024-55565
-CVE-2025-22150
-CVE-2023-26136
diff --git a/modules/release-notes/list-fixed-security-issues-in-rpm-1.5.0.txt b/modules/release-notes/list-fixed-security-issues-in-rpm-1.5.0.txt
deleted file mode 100644
index e69de29bb2..0000000000
diff --git a/modules/release-notes/ref-release-notes-breaking-changes.adoc b/modules/release-notes/ref-release-notes-breaking-changes.adoc
index f2310b4f6c..e6cb9aa828 100644
--- a/modules/release-notes/ref-release-notes-breaking-changes.adoc
+++ b/modules/release-notes/ref-release-notes-breaking-changes.adoc
@@ -4,6 +4,88 @@
This section lists breaking changes in {product} {product-version}.
+[id="removed-functionality-rhidp-6215"]
+== The Topology-specific permission `topology.view.read` is removed
+
+Previously, the Topology plugin used `topology.view.read` permission to control access. Users were unable to configure Topology permissions using the RBAC UI. With this update, users can configure Kubernetes plugin permissions using the RBAC UI, which now governs the access to the Topology plugin. You can now use Kubernetes plugin permissions `kubernetes.clusters.read`, `kubernetes.resources.read` and `kubernetes.proxy` for the Topology plugin, as the Topology-specific permission `topology.view.read` is removed.
+
+If you are using a CSV permission file, update the following lines:
+
+.Old Topology permission definition
+[source,csv]
+----
+p, role:default/topology-viewer, topology.view.read, read, allow
+p, role:default/topology-viewer, kubernetes.proxy, use, allow
+----
+
+.New Topology permission definition
+[source,csv]
+----
+p, role:default/topology-viewer, kubernetes.clusters.read, read, allow
+p, role:default/topology-viewer, kubernetes.resources.read, read, allow
+p, role:default/topology-viewer, kubernetes.proxy, use, allow
+----
+
+
+.Additional resources
+* link:https://issues.redhat.com/browse/RHIDP-6215[RHIDP-6215]
+
+[id="removed-functionality-rhidp-7365"]
+== Migration to the core Auditor service
+
+The Auditor format, including audit fields and event names, and IDs, has been updated to align with the new Auditor service conventions defined by the upstream Backstage Auditor Service. Filtering queries based on the old format may fail to work as expected.
+
+
+.Additional resources
+* link:https://issues.redhat.com/browse/RHIDP-7365[RHIDP-7365]
+
+[id="removed-functionality-rhidp-7373"]
+== {product} introduces the Backstage Audit Log Service
+
+{product} {product-version} introduces the Backstage Audit Log Service, which replaces the custom audit logging system. This is a significant structural and behavioral change to how audit events are generated and consumed.
+
+The key changes introduced by this transition include the following:
+
+* Audit logging is now delegated to Backstage plugins. Each plugin in Backstage is responsible for implementing and emitting its own audit events.
+
+* Audit event names, structure, and content may differ per plugin. Audit events are scoped and designed independently within each plugin using the standardized upstream mechanism, which automatically captures actor details and plugin context.
+
+* New Event Structure and Naming: Audit event names now follow Backstage’s conventions (for example, lowercase, kebab-case names), and include structured metadata such as `actionType`. Legacy {product-short} event names (for example, `ScaffolderTaskCreation`, `CatalogEntityDeletion`) are no longer used.
+
+* Enhanced Log Context: Each audit event includes the plugin context, making it easier to filter logs for specific functional areas. You can filter by the event IDs or metadata associated with that plugin.
+
+
+
+
+.Additional resources
+* link:https://issues.redhat.com/browse/RHIDP-7373[RHIDP-7373]
+
+[id="removed-functionality-rhidp-7433"]
+== The Tekton-specific permission `tekton.view.read` is removed
+
+Previously, the Tekton plugin used `tekton.view.read` permission to control access. Users were unable to configure Tekton permissions using the RBAC UI. With this update, users can configure Kubernetes plugin permissions using the RBAC UI, which now governs the access to the Tekton plugin. You can now use Kubernetes plugin permissions `kubernetes.clusters.read`, `kubernetes.resources.read` and `kubernetes.proxy` for the Tekton plugin, as the Tekton-specific permission `tekton.view.read` is removed.
+
+If you are using a CSV permission file, update the following lines:
+
+.Old Tekton permission definition
+[source,csv]
+----
+p, role:default/tekton-viewer, tekton.view.read, read, allow
+p, role:default/tekton-viewer, kubernetes.proxy, use, allow
+----
+
+.New Tekton permission definition
+[source,csv]
+----
+p, role:default/tekton-viewer, kubernetes.clusters.read, read, allow
+p, role:default/tekton-viewer, kubernetes.resources.read, read, allow
+p, role:default/tekton-viewer, kubernetes.proxy, use, allow
+----
+
+
+
+.Additional resources
+* link:https://issues.redhat.com/browse/RHIDP-7433[RHIDP-7433]
+
-None.
diff --git a/modules/release-notes/ref-release-notes-deprecated-functionalities.adoc b/modules/release-notes/ref-release-notes-deprecated-functionalities.adoc
index 56892e69a7..78d67a1ecf 100644
--- a/modules/release-notes/ref-release-notes-deprecated-functionalities.adoc
+++ b/modules/release-notes/ref-release-notes-deprecated-functionalities.adoc
@@ -4,6 +4,16 @@
This section lists deprecated functionalities in {product} {product-version}.
+[id="deprecated-functionality-rhidp-6368"]
+== Deprecation of dynamic imports with `import(...)`
+
+The use of dynamic imports with `import(...)` has been deprecated and is no longer supported. The Backstage CLI supports native ESM in Node.js code, giving access to the importing of ESM-only packages. Therefore, you must now use `require(...)` as typeof `import(...)` when working with ESM or CommonJS packages.
+
+
+
+
+.Additional resources
+* link:https://issues.redhat.com/browse/RHIDP-6368[RHIDP-6368]
+
-None.
diff --git a/modules/release-notes/ref-release-notes-developer-preview.adoc b/modules/release-notes/ref-release-notes-developer-preview.adoc
new file mode 100644
index 0000000000..0f6b03d8e5
--- /dev/null
+++ b/modules/release-notes/ref-release-notes-developer-preview.adoc
@@ -0,0 +1,29 @@
+:_content-type: REFERENCE
+[id="developer-preview"]
+= Developer Preview
+
+This section lists Developer Preview features in {product} {product-version}.
+
+[IMPORTANT]
+====
+Developer Preview features are not supported by Red Hat in any way and are not functionally complete or production-ready. Do not use Developer Preview features for production or business-critical workloads. Developer Preview features provide early access to functionality in advance of possible inclusion in a Red Hat product offering. Customers can use these features to test functionality and provide feedback during the development process. Developer Preview features might not have any documentation, are subject to change or removal at any time, and have received limited testing. Red Hat might provide ways to submit feedback on Developer Preview features without an associated SLA.
+
+For more information about the support scope of Red Hat Developer Preview features, see link:https://access.redhat.com/support/offerings/devpreview/[Developer Preview Support Scope].
+====
+
+[id="developer-preview-rhidp-6575"]
+== {product} Local
+
+{product-local} ({product-local-very-short}) is now available as a Developer Preview feature, providing a lightweight, self-contained version of {product-very-short} that allows developers and platform engineers to work on templates, try out plugins, validate software catalogs, and do other tasks without having to install {product-short} on a Kubernetes cluster.
+
+For more information about installing {product-local-very-short}, see link:https://github.com/redhat-developer/rhdh-local[{product-local} on Github].
+
+.Additional resources
+* For more information, see the blog post, link:https://developers.redhat.com/blog/2025/03/31/run-red-hat-developer-hub-locally-ease[Run Red Hat Developer Hub Locally with Ease]
+
+
+.Additional resources
+* link:https://issues.redhat.com/browse/RHIDP-6575[RHIDP-6575]
+
+
+
diff --git a/modules/release-notes/ref-release-notes-fixed-issues.adoc b/modules/release-notes/ref-release-notes-fixed-issues.adoc
index dad582fc5a..65245ab098 100644
--- a/modules/release-notes/ref-release-notes-fixed-issues.adoc
+++ b/modules/release-notes/ref-release-notes-fixed-issues.adoc
@@ -4,6 +4,101 @@
This section lists issues fixed in {product} {product-version}.
+== Fixed issues in 1.6.0
+
+[id="bug-fix-rhidp-5731"]
+=== Updated the air-gapped installation guide for non-OpenShift platforms
+
+Previously, {product} documentation did not highlight the {product-short} installation using Helm Chart in fully and partially air-gapped environments on supported Kubernetes platforms.
+
+With this update, the documentation provides instructions for mirroring required container images, updating Helm values, and installing the chart, without relying on internet access.
+
+
+.Additional resources
+* link:https://issues.redhat.com/browse/RHIDP-5731[RHIDP-5731]
+
+
+[id="bug-fix-rhidp-6010"]
+=== Line wrapping enabled for long menu heading labels
+
+Previously, menu items with long text such as _Platform Engineer Services_ were cut off in the sidebar menu. With this update, line wrapping has been enabled for long menu heading labels, preventing the trimming and ensuring full text visibility.
+
+
+.Additional resources
+* link:https://issues.redhat.com/browse/RHIDP-6010[RHIDP-6010]
+
+
+[id="bug-fix-rhidp-6015"]
+=== Dynamic favicon configuration in `app-config.yaml` is not displayed on the login page
+
+Before this update, the app-config configuration `app.branding.iconLogo` was not applied as the favicon in the browser.
+
+This issue has been fixed, and the app-config configuration `app.branding.iconLogo` now correctly sets the favicon in the browser.
+
+
+.Additional resources
+* link:https://issues.redhat.com/browse/RHIDP-6015[RHIDP-6015]
+
+
+[id="bug-fix-rhidp-6042"]
+=== Floating Action Button (FAB) positioned in the 'Bottom-Left' slot on {product-short}
+
+Previously, the Floating Action Button (FAB) appeared over the navigation sidebar when the slot was set to 'bottom-left'. This placement obstructed access to navigation elements, potentially hindering user interaction.
+
+With this update, the FAB's position is adjusted to render adjacent to the navigation for the 'bottom-left' slot position. As a result, users can access navigation options without obstruction.
+
+
+.Additional resources
+* link:https://issues.redhat.com/browse/RHIDP-6042[RHIDP-6042]
+
+
+[id="bug-fix-rhidp-6448"]
+=== Manually added resolutions override resolutions added by `--suppress-native-package`
+
+Earlier, the `export-dynamic-plugin` command did not overwrite manually added resolutions, which could result in incorrect package dependencies in the exported dynamic plugin.
+
+With this update, the `package export-dynamic-plugin` overwrites manually added resolutions, ensuring backstage dependencies are hoisted and native dependencies are suppressed from the exported dynamic plugin.
+
+
+.Additional resources
+* link:https://issues.redhat.com/browse/RHIDP-6448[RHIDP-6448]
+
+
+[id="bug-fix-rhidp-6597"]
+=== Fixed unreachable links in the Operator installation information page on the OpenShift console
+
+Previously, the {product-short} Operator details page in the OpenShift web console contained links to some pages that could not be reached outside of the {company-name} network.
+With this update, the links in the Operator installation information page have been fixed.
+
+
+
+
+.Additional resources
+* link:https://issues.redhat.com/browse/RHIDP-6597[RHIDP-6597]
+
+
+[id="bug-fix-rhidp-7380"]
+=== Fixed Profile dropdown showing `Guest` instead of user's name with OIDC authentication
+
+Previously, the Profile dropdown in the Global Header showed `Guest` instead of the logged-in user’s display name when logging in using the OIDC authentication.
+
+With this update, the Profile dropdown now correctly displays the user's name by first checking `spec.profile.displayName`, then `metadata.title`, and finally falls back to the name shown on the Profile card in the Settings page if neither is available.
+
+
+.Additional resources
+* link:https://issues.redhat.com/browse/RHIDP-7380[RHIDP-7380]
+
+
+[id="bug-fix-rhidp-7529"]
+=== Fixed incorrect branding in 1.6.0 Helm chart release
+
+Previously, the upstream branding was used instead of {company-name} branding when releasing the 1.6.0 Helm chart.
+
+This is fixed in 1.6.1
+
+
+.Additional resources
+* link:https://issues.redhat.com/browse/RHIDP-7529[RHIDP-7529]
+
-None.
diff --git a/modules/release-notes/ref-release-notes-fixed-security-issues.adoc b/modules/release-notes/ref-release-notes-fixed-security-issues.adoc
new file mode 100644
index 0000000000..ec92b44cb9
--- /dev/null
+++ b/modules/release-notes/ref-release-notes-fixed-security-issues.adoc
@@ -0,0 +1,9 @@
+:_content-type: REFERENCE
+[id="fixed-security-issues"]
+= Fixed security issues
+
+You can view the security issues fixed in {product} {product-version} at link:https://access.redhat.com/security/security-updates/cve?q=red+hat+developer+hub&p=1&sort=cve_publicDate+desc,allTitle+desc&rows=10&documentKind=Cve[Red Hat Security Updates].
+
+// For 1.6.1, see link:https://access.redhat.com/errata/RHSA-2025:????[Red Hat Security Advisory RHSA-2025:????].
+
+For 1.6.0, see link:https://access.redhat.com/errata/RHSA-2025:7626[Red Hat Security Advisory RHSA-2025:7626].
diff --git a/modules/release-notes/ref-release-notes-known-issues.adoc b/modules/release-notes/ref-release-notes-known-issues.adoc
index 81fb34582e..1db9fb17ed 100644
--- a/modules/release-notes/ref-release-notes-known-issues.adoc
+++ b/modules/release-notes/ref-release-notes-known-issues.adoc
@@ -4,6 +4,14 @@
This section lists known issues in {product} {product-version}.
+[id="known-issue-rhidp-7435"]
+== ArgoCD plugin permission and Quay plugin permission are not displayed in the RBAC front-end UI
+
+Permissions associated only with front-end plugins do not appear in the UI because they require a backend plugin to expose the permission framework's well-known endpoint. As a workaround, you can apply these permissions by using a CSV file or directly calling the REST API of the RBAC backend plugin. Affected plugins include ArgoCD (`argocd.view.read`) and Quay (`quay.view.read`).
+
+
+.Additional resources
+* link:https://issues.redhat.com/browse/RHIDP-7435[RHIDP-7435]
+
-None.
diff --git a/modules/release-notes/ref-release-notes-new-features.adoc b/modules/release-notes/ref-release-notes-new-features.adoc
index a9fe742256..2c1a0fec99 100644
--- a/modules/release-notes/ref-release-notes-new-features.adoc
+++ b/modules/release-notes/ref-release-notes-new-features.adoc
@@ -4,5 +4,168 @@
This section highlights new features in {product} {product-version}.
-None.
+[id="feature-rhidp-3597"]
+== OpenTelemetry metrics support added to the Keycloak backend plugin
+
+With this update, the Keycloak backend plugin supports OpenTelemetry metrics, which monitors fetch operations and diagnoses potential issues.
+
+The available counters include the following:
+
+* `backend_keycloak_fetch_task_failure_count_total`: Counts fetch task failures where no data was returned due to an error.
+
+* `backend_keycloak_fetch_data_batch_failure_count_total`: Counts partial data batch failures. Even if some batches fail, the plugin continues fetching others.
+
+These counters include the `taskInstanceId` label, which uniquely identifies each scheduled fetch task, and allows you to trace failures back to individual task executions.
+
+Example configuration:
+
+```text
+backend_keycloak_fetch_data_batch_failure_count_total{taskInstanceId="df040f82-2e80-44bd-83b0-06a984ca05ba"} 1
+```
+
+You can export metrics using any OpenTelemetry-compatible backend, such as **Prometheus**.
+
+
+[id="enhancement-rhidp-5039"]
+== Enhanced session duration control and refresh token cookie policy
+
+With this update, a new configurable field, `sessionDuration`, has been introduced in the supported authentication providers. This allows administrators to specify custom user session durations, enabling better control over session timeouts and enforced logouts. Additionally, the default maximum age of the refresh token cookie has been reduced to 400 days to align with the modern web browser policies.
+
+For more information, see link:https://docs.redhat.com/en/documentation/red_hat_developer_hub/1.5/html-single/authentication_in_red_hat_developer_hub/index#idm140459408106672[Authentication in {product}].
+
+[id="enhancement-rhidp-5211"]
+== Support for custom version information on the settings page
+
+{product} now supports the extension or replacement of version information on the settings page. This feature allows customers and partners to replace the version information on the settings page with their own versions.
+
+
+
+[id="enhancement-rhidp-5987"]
+== Updated Auditor Service
+
+{product} {product-version} introduces an enhancement to the RBAC and Bulk Import plugins, enabling users to utilize Backstage's new Auditor service. The key features include:
+
+* Audit log format update:
+
+The audit log format has been updated to align with the new Auditor service conventions. Audit fields and event identifiers have been updated. Filtering queries based on the old format may no longer function as expected.
+
+* Backend Plugin API integration:
+
+The audit log is now backed by the `@backstage/backend-plugin-api` package.
+
+* Audit events Grouping:
+
+The Bulk Import backend plugin and RBAC backend plugin emit audit events for various operations, with the events grouped logically by `eventId`.
+
+
+
+
+[id="feature-rhidp-6158"]
+== Renamed `Create` to `Self-service`
+
+The term `Create` has been renamed to `Self-service` across key UI areas to better align with the self-service functionality provided through the Backstage scaffolder, enhancing clarity for users.
+
+This change applies to the following areas:
+
+* Sidebar navigation
+* Global header
+* Catalog page
+* Scaffolder page
+
+[id="feature-rhidp-6170"]
+== Enhanced plugin visibility in the Extensions catalog
+
+With this update, the Extensions catalog now displays the default configuration of included plugins directly in {product}. This feature helps administrators better understand available plugins and their configuration options before enabling them. While plugin configurations are now visible, administrators still need to manually copy these configurations into their Helm Charts or Operator custom resource to install or configure a plugin.
+
+[id="enhancement-rhidp-6173"]
+== Simplify Operator-backed deployments on OpenShift with automatic `baseUrl` configuration
+
+Previously, deploying {product-short} using the Operator required manually configuring the `baseUrl` settings in the custom app-config ConfigMap.
+
+With this update, the Operator can now automatically compute the default application URL based on the OpenShift cluster ingress domain and the custom Route settings in the `Backstage` Custom Resource. It will then populate this as the default `baseUrl` in the app-config ConfigMap that it generates for the {product-short} instance. This functionality is specific to OpenShift. The Operator fills the following fields in the default app-config ConfigMap: `app.baseUrl`, `backend.baseUrl`, and `backend.cors.origin`. As a result, this eliminates the need to manually set such values for most Operator-backed deployments on OpenShift, though you can still override these settings in your custom app-config ConfigMap.
+
+[id="enhancement-rhidp-6184"]
+== New sidebar item visibility configuration
+
+{product} now supports a clean and flexible way to hide sidebar items using a new enabled key in the sidebar menu configuration. If set to false, the specified sidebar item will no longer appear in the UI, while maintaining full backward compatibility with existing configurations.
+
+Example configuration:
+[source,yaml]
+----
+dynamicPlugins:
+ frontend:
+ default.main-menu-items:
+ menuItems:
+ default.home:
+ title: Home
+ icon: home
+ enabled: false
+ default.list:
+ title: References
+ icon: bookmarks
+ default.my-group:
+ parent: default.list
+ default.learning-path:
+ parent: default.list
+ title: ''
+ default.homepage:
+ title: HomePage 123
+ icon: home
+ enabled: false
+ default.create:
+ title: Create
+ icon: add
+ parent: default.homepage
+----
+
+You can now also toggle visibility of core sidebar elements like the logo, search, settings, and administration as shown:
+
+[source,yaml]
+----
+app:
+ sidebar:
+ search: false # hides sidebar search
+ logo: false # hides sidebar logo
+ settings: false # hides settings item
+ administration: false # hides administration item
+----
+
+[id="feature-rhidp-6253"]
+== {product-short} community plugins updated to Backstage 1.36
+
+The {product-short} community plugins have been updated to Backstage version 1.36.
+
+[id="feature-rhidp-6269"]
+== Added a new RBAC conditional rule `IS_OWNER` to RBAC plugin
+
+{product} introduces a new RBAC conditional rule, `IS_OWNER`, that allows administrators to assign ownership to roles and control access to the RBAC plugin. This enhancement enables more granular access control by allowing ownership-based filtering of roles, permission policies, and conditional policies.
+
+This enhancement removes the resource type from the `policy.entity.create` permission, preventing conditional rules from being applied to the permission. You can update all permission policies that utilize the resource type `policy-entity` with the action `create` (for example `role:default/some_role, policy-entity, create, allow` to `role:default/some_role, policy.entity.create, create, allow`) to prevent degradation in the future.
+
+
+[id="feature-rhidp-6555"]
+== Support for high availability in {aks-brand-name}
+
+{product} now supports high availability setups in {aks-brand-name} ({aks-short}). This enhancement allows the deployment to scale beyond a single replica, ensuring the application remains operational and accessible even in the event of failures or disruptions.
+
+For more information, see link:https://docs.redhat.com/en/documentation/red_hat_developer_hub/1.5/html-single/configuring_red_hat_developer_hub/index#HighAvailability[_Configuring high availability in Red Hat Developer Hub_].
+
+[id="feature-rhidp-6764"]
+== Added `@backstage/plugin-scaffolder-backend-module-github` plugin for {product-short}
+
+{product} now supports the `@backstage/plugin-scaffolder-backend-module-github` plugin, enabling GitHub Actions within software templates. With this integration, you can securely create and manage repositories, open pull requests, trigger GitHub Actions workflows, and more, all directly from the software template. This plugin empowers users to automate GitHub interactions and workflows with ease.
+
+[id="enhancement-rhidp-6882"]
+== Default OIDC sign-in resolver updated
+
+With this update, the default resolver for OIDC sign-in is set to `oidcSubClaimMatchingKeycloakUserId` to enhance security. This resolver is now also available as a configurable option under the sign-in resolver settings.
+
+[id="feature-rhidp-7424"]
+== New dynamic plugin for Kubernetes scaffolder actions
+
+With this update, {product-short} introduces the @backstage-community/plugin-scaffolder-backend-module-kubernetes plugin as a dynamic plugin, enabling Backstage template actions for Kubernetes. Currently, it includes the create-namespace action. This dynamic plugin is disabled by default.
+
+For more information, see link:https://docs.redhat.com/en/documentation/red_hat_developer_hub/{product-version}/html-single/configuring_dynamic_plugins/index#con-Kubernetes-custom-actions_title-plugins-rhdh-configure[Kubernetes custom actions in {product}].
+
+
diff --git a/modules/release-notes/ref-release-notes-technology-preview.adoc b/modules/release-notes/ref-release-notes-technology-preview.adoc
deleted file mode 100644
index 350e30a75e..0000000000
--- a/modules/release-notes/ref-release-notes-technology-preview.adoc
+++ /dev/null
@@ -1,17 +0,0 @@
-:_content-type: REFERENCE
-[id="technology-preview"]
-= Technology Preview
-
-This section lists Technology Preview features in {product} {product-version}.
-
-[IMPORTANT]
-====
-Technology Preview features provide early access to upcoming product innovations, enabling you to test functionality and provide feedback during the development process.
-However, these features are not fully supported under Red Hat Subscription Level Agreements, may not be functionally complete, and are not intended for production use.
-As Red Hat considers making future iterations of Technology Preview features generally available, we will attempt to resolve any issues that customers experience when using these features.
-See: link:https://access.redhat.com/support/offerings/techpreview/[Technology Preview support scope].
-====
-
-
-None.
-
diff --git a/modules/release-notes/single-source-fixed-security-issues.sh b/modules/release-notes/single-source-fixed-security-issues.sh
deleted file mode 100755
index 4fa0864f95..0000000000
--- a/modules/release-notes/single-source-fixed-security-issues.sh
+++ /dev/null
@@ -1,62 +0,0 @@
-#!/bin/bash
-#
-# Copyright (c) 2024 Red Hat, Inc.
-# This program, and the accompanying materials are made
-# available under the terms of the Apache Public License 2.0,
-# available at http://www.apache.org/licenses/
-#
-# SPDX-License-Identifier: Apache-2.0
-
-# Single-source the release notes Fixed security issues section from Red Hat Security Data API.
-# See: https://docs.redhat.com/en/documentation/red_hat_security_data_api/1.0/html/red_hat_security_data_api/cve
-
-# Fail and stop on first error
-set -e
-
-# get the z-stream version from the bundle-version attribute. Note that while chart-version could be larger, this is the correct value for CVE tracking
-product_version="$(grep ':product-bundle-version:' artifacts/attributes.adoc | cut -d' ' -f2 )"
-
-single_source_from_security_data () {
- sectionname="fixed-security-issues-in-${section}-${product_version}"
- dirname=$(dirname ${BASH_SOURCE})
- destination="${dirname}/snip-${sectionname}.adoc"
- list="${dirname}/list-${sectionname}.txt"
- # Assert that the list file exists.
- if [ ! -f ${list} ]
- then
- echo "ERROR: The ${list} file is missing. You must create it to proceed. For a given version, can collect the list of CVEs from a JIRA query like https://issues.redhat.com/issues/?jql=labels%3DSecurityTracking+and+project%3DRHIDP+and+fixversion%3D1.3.1 or list of Erratas from https://errata.devel.redhat.com/advisory/filters/4213"
- exit 1
- fi
- # Cleanup the destination files.
- rm -f "$destination"
- # Send output to the destination file.
- exec 3>&1 1>> "$destination"
- echo "= ${title}"
- for cve in $(cat ${list} | sort | uniq)
- do
- # Start the list.
- echo "link:https://access.redhat.com/security/cve/$cve[$cve]::"
- # Call the API to return a list of details.
- # Red Hat is last if there is one.
- # Red Hat details is single line.
- # MITRE details are multiline.
- # We keep Red Hat details if present.
- # We keep only the first two lines on MITRE details.
- curl -s "https://access.redhat.com/hydra/rest/securitydata/cve/$cve.json" | jq -r '.details[-1]' | head -n 2
- # Add a separation
- echo ""
- done
- # Stop sending output to the destination file
- exec 1>&3 3>&-
- echo "include::${destination}[leveloffset=+2]"
-}
-
-title="{product} dependency updates"
-section="product"
-single_source_from_security_data
-
-title="RHEL 9 platform RPM updates"
-section="rpm"
-single_source_from_security_data
-
-echo "INFO: Verify that the assemblies/assembly-release-notes-fixed-security-issues.adoc file contains aforementioned required include statements."
diff --git a/modules/release-notes/single-source-release-notes.jira2asciidoc.yml b/modules/release-notes/single-source-release-notes.jira2asciidoc.yml
index 170339d62b..222f03e137 100644
--- a/modules/release-notes/single-source-release-notes.jira2asciidoc.yml
+++ b/modules/release-notes/single-source-release-notes.jira2asciidoc.yml
@@ -61,7 +61,28 @@ sections:
AND "Release Note Status" = "Done"
AND level is EMPTY
AND status in (Closed, "Release Pending")
- AND "Release Note Type" in ("Developer Preview", "Technology Preview")
+ AND "Release Note Type" in ("Technology Preview")
+ AND fixVersion >= "{version_minor}"
+ AND fixVersion <= "{version_patch}"
+ ORDER BY key
+ template: with-jira-link
+ - id: developer-preview
+ title: Developer Preview
+ description: |
+ This section lists Developer Preview features in {product} {product-version}.
+
+ [IMPORTANT]
+ ====
+ Developer Preview features are not supported by Red Hat in any way and are not functionally complete or production-ready. Do not use Developer Preview features for production or business-critical workloads. Developer Preview features provide early access to functionality in advance of possible inclusion in a Red Hat product offering. Customers can use these features to test functionality and provide feedback during the development process. Developer Preview features might not have any documentation, are subject to change or removal at any time, and have received limited testing. Red Hat might provide ways to submit feedback on Developer Preview features without an associated SLA.
+
+ For more information about the support scope of Red Hat Developer Preview features, see link:https://access.redhat.com/support/offerings/devpreview/[Developer Preview Support Scope].
+ ====
+ query: >
+ project = "Red Hat Internal Developer Platform"
+ AND "Release Note Status" = "Done"
+ AND level is EMPTY
+ AND status in (Closed, "Release Pending")
+ AND "Release Note Type" in ("Developer Preview")
AND fixVersion >= "{version_minor}"
AND fixVersion <= "{version_patch}"
ORDER BY key
@@ -69,7 +90,7 @@ sections:
- id: fixed-issues
title: Fixed issues
description: |
- This section lists issues fixed in {product} {product-version} that have a significant impact on users.
+ This section lists issues fixed in {product} {product-version}.
query: >
project = "Red Hat Internal Developer Platform"
AND "Release Note Status" = "Done"
diff --git a/modules/release-notes/snip-fixed-security-issues-in-product-1.5.0.adoc b/modules/release-notes/snip-fixed-security-issues-in-product-1.5.0.adoc
deleted file mode 100644
index c1edd25487..0000000000
--- a/modules/release-notes/snip-fixed-security-issues-in-product-1.5.0.adoc
+++ /dev/null
@@ -1,25 +0,0 @@
-= {product} dependency updates
-link:https://access.redhat.com/security/cve/CVE-2023-26136[CVE-2023-26136]::
-A flaw was found in the tough-cookie package which allows Prototype Pollution due to improper handling of Cookies when using CookieJar in rejectPublicSuffixes=false mode. This issue arises from the manner in which the objects are initialized.
-
-link:https://access.redhat.com/security/cve/CVE-2024-45338[CVE-2024-45338]::
-A flaw was found in golang.org/x/net/html. This flaw allows an attacker to craft input to the parse functions that would be processed non-linearly with respect to its length, resulting in extremely slow parsing. This issue can cause a denial of service.
-
-link:https://access.redhat.com/security/cve/CVE-2024-52798[CVE-2024-52798]::
-A flaw was found in path-to-regexp. A path-to-regexp turns path strings into regular expressions. In certain cases, path-to-regexp will output a regular expression that can be exploited to cause poor performance.
-
-link:https://access.redhat.com/security/cve/CVE-2024-55565[CVE-2024-55565]::
-nanoid (aka Nano ID) before 5.0.9 mishandles non-integer values. 3.3.8 is also a fixed version.
-
-link:https://access.redhat.com/security/cve/CVE-2024-56201[CVE-2024-56201]::
-A flaw was found in the Jinja2 package. A bug in the Jinja compiler allows an attacker that controls both the content and filename of a template to execute arbitrary Python code, regardless of Jinja's sandbox being used. An attacker needs to be able to control both the filename and the contents of a template. Whether that is the case depends on the type of application using Jinja. This vulnerability impacts users of applications that execute untrusted templates where the template author can also choose the template filename.
-
-link:https://access.redhat.com/security/cve/CVE-2024-56326[CVE-2024-56326]::
-A flaw was found in the Jinja package. In affected versions of Jinja, an oversight in how the Jinja sandboxed environment detects calls to str.format allows an attacker that controls the content of a template to execute arbitrary Python code. To exploit the vulnerability, an attacker needs to control the content of a template. Whether that is the case depends on the type of application using Jinja. This vulnerability impacts users of applications that execute untrusted templates. Jinja's sandbox does catch calls to str.format and ensures they don't escape the sandbox. However, storing a reference to a malicious string's format method is possible, then passing that to a filter that calls it. No such filters are built into Jinja but could be present through custom filters in an application. After the fix, such indirect calls are also handled by the sandbox.
-
-link:https://access.redhat.com/security/cve/CVE-2024-56334[CVE-2024-56334]::
-A flaw was found in the systeminformation library for Node.js. In Windows systems, the SSID parameter of the `getWindowsIEEE8021x` function is not sanitized before it is passed to cmd.exe. This may allow a remote attacker to execute arbitrary commands on the target system.
-
-link:https://access.redhat.com/security/cve/CVE-2025-22150[CVE-2025-22150]::
-A flaw was found in the undici package for Node.js. Undici uses `Math.random()` to choose the boundary for a multipart/form-data request. It is known that the output of `Math.random()` can be predicted if several of its generated values are known. If an app has a mechanism that sends multipart requests to an attacker-controlled website, it can leak the necessary values. Therefore, an attacker can tamper with the requests going to the backend APIs if certain conditions are met.
-
diff --git a/modules/release-notes/snip-fixed-security-issues-in-rpm-1.5.0.adoc b/modules/release-notes/snip-fixed-security-issues-in-rpm-1.5.0.adoc
deleted file mode 100644
index e4930e95c6..0000000000
--- a/modules/release-notes/snip-fixed-security-issues-in-rpm-1.5.0.adoc
+++ /dev/null
@@ -1 +0,0 @@
-= RHEL 9 platform RPM updates
diff --git a/modules/software-catalogs/proc-adding-new-components-to-the-rhdh-instance.adoc b/modules/software-catalogs/proc-adding-new-components-to-the-rhdh-instance.adoc
new file mode 100644
index 0000000000..5cc6428f09
--- /dev/null
+++ b/modules/software-catalogs/proc-adding-new-components-to-the-rhdh-instance.adoc
@@ -0,0 +1,20 @@
+// Module included in the following assemblies:
+//
+// * assemblies/assembly-about-software-catalogs.adoc
+
+:_mod-docs-content-type: PROCEDURE
+[id="proc-adding-new-components-to-the-rhdh-instance_{context}"]
+= Adding new components to your {product} instance
+
+.Prerequisites
+
+* You have installed and configured the {product} instance.
+* You have the required permissions. See link:{authorization-book-url}[{authorization-book-title}].
+
+.Procedure
+
+You can add components to your {product-very-short} instance using the following methods:
+
+* Register components manually using the GUI or by using your `{my-app-config-file}` with the required permissions.
+* Create new components by using Software Templates.
+* Use the bulk import plugin with the required permissions. For more information, see link:{configuring-dynamic-plugins-book-url}#bulk-importing-github-repositories[Bulk importing GitHub repositories].
\ No newline at end of file
diff --git a/modules/software-catalogs/proc-creating-new-components-in-the-rhdh-instance.adoc b/modules/software-catalogs/proc-creating-new-components-in-the-rhdh-instance.adoc
new file mode 100644
index 0000000000..a0ad1f4a0e
--- /dev/null
+++ b/modules/software-catalogs/proc-creating-new-components-in-the-rhdh-instance.adoc
@@ -0,0 +1,19 @@
+// Module included in the following assemblies:
+//
+// * assemblies/assembly-about-software-catalogs.adoc
+
+:_mod-docs-content-type: PROCEDURE
+[id="proc-creating-new-components-in-the-rhdh-instance_{context}"]
+== Creating new components in your {product} instance
+
+You can create new components in the Software Catalog in your {product-very-short} instance. {product} automatically registers all components that developers or platform engineers create using Templates in the Software Catalog.
+
+.Prerequisites
+
+* You have installed and configured the {product} instance.
+* You have the required permissions. See link:{authorization-book-url}[{authorization-book-title}].
+
+.Procedure
+
+. In your {product} navigation menu, click *Catalog*.
+. On the *Catalog* page, click *Self-service*.
\ No newline at end of file
diff --git a/modules/software-catalogs/proc-registering-components-manually-in-the-rhdh-instance.adoc b/modules/software-catalogs/proc-registering-components-manually-in-the-rhdh-instance.adoc
new file mode 100644
index 0000000000..ae6f964fdf
--- /dev/null
+++ b/modules/software-catalogs/proc-registering-components-manually-in-the-rhdh-instance.adoc
@@ -0,0 +1,46 @@
+// Module included in the following assemblies:
+//
+// * assemblies/assembly-about-software-catalogs.adoc
+
+:_mod-docs-content-type: PROCEDURE
+[id="proc-registering-components-manually-in-the-rhdh-instance_{context}"]
+= Registering components manually in your {product-very-short} instance
+
+To manually register components in your {product-very-short} instance, create a `catalog-info.yaml` file and register it with your {product} instance. The `catalog-info.yaml` file contains the metadata you wish to register for your software component.
+
+.Prerequisites
+
+* You have installed and configured the {product} instance.
+* You have the required permissions. See link:{authorization-book-url}[{authorization-book-title}].
+
+.Procedure
+
+. In the root directory of your software project, create a file named `catalog-info.yaml`.
++
+.Example of a `catalog-info.yaml` file
+[source,yaml]
+----
+apiVersion: backstage.io/v1alpha1
+kind: Component
+metadata:
+ name: __
+ description: __
+ tags:
+ - example
+ - service
+ annotations:
+ github.com/project-slug: __
+spec:
+ type: __
+ owner: __
+ lifecycle: __
+----
+. Commit the `catalog-info.yaml` file to the root of your project source code repository.
+. In your {product} navigation menu, go to *Catalog* > *Self-service*.
+. On the *Self-service* page, click *Register Existing Component*.
+. On the *Register an existing component* page, enter the full URL of the `catalog-info.yaml` file in your repository. For example: link:https://github.com/backstage/backstage/blob/master/packages/catalog-model/examples/components/artist-lookup-component.yaml[Artist lookup component].
+. Complete the wizard instructions.
+
+.Verification
+
+* Your software component is listed in the Software Catalog. You can view its details and ensure all the metadata is accurate.
\ No newline at end of file
diff --git a/modules/software-catalogs/proc-searching-and-filter-software-catalogs.adoc b/modules/software-catalogs/proc-searching-and-filter-software-catalogs.adoc
new file mode 100644
index 0000000000..b30df81775
--- /dev/null
+++ b/modules/software-catalogs/proc-searching-and-filter-software-catalogs.adoc
@@ -0,0 +1,33 @@
+// Module included in the following assemblies:
+//
+// * assemblies/assembly-about-software-catalogs.adoc
+
+:_mod-docs-content-type: PROCEDURE
+[id="proc-searching-and-filtering-software-catalogs_{context}"]
+= Searching and Filtering Software Catalogs
+
+You can search and filter Software Catalogs by its *Kind* or by using the *Filter* field.
+
+== Searching and Filtering Software Catalogs by Kind
+
+To filter a Software Catalog by its *Kind*, complete the following steps:
+
+.Procedure
+
+. In your {product} navigation menu, click *Catalog*.
+. On the *Catalog* page, click the *Kind* drop-down list.
+. Select the type of *Kind* you want to filter.
++
+[NOTE]
+====
+The available filter dropdowns vary based on the *Kind* you select, displaying options relevant to that specific entity type.
+====
+
+== Searching and Filtering Software Catalogs using the Filter field
+
+To filter a Software Catalog using the *Filter* field, complete the following steps:
+
+.Procedure
+
+. In your {product} navigation menu, click *Catalog*.
+. In the *Search* box, enter the text you want to use to filter the components.
\ No newline at end of file
diff --git a/modules/software-catalogs/proc-starring-components-in-the-software-catalog.adoc b/modules/software-catalogs/proc-starring-components-in-the-software-catalog.adoc
new file mode 100644
index 0000000000..eb2e756173
--- /dev/null
+++ b/modules/software-catalogs/proc-starring-components-in-the-software-catalog.adoc
@@ -0,0 +1,21 @@
+// Module included in the following assemblies:
+//
+// * assemblies/assembly-about-software-catalogs.adoc
+
+:_mod-docs-content-type: PROCEDURE
+[id="proc-starring-components-in-the-software-catalog_{context}"]
+= Starring components in the Software Catalog
+
+
+You can use the *Add to favorites* icon to add the software catalogs that you visit regularly to the *Starred* category.
+
+.Procedure
+
+To quickly access the Software Catalogs that you visit regularly, complete the following steps:
+
+. In your {product} navigation menu, click *Catalog*.
+. Find the software component that you want to add as a favorite, then click the *Add to favorites* icon under *Actions*.
+
+.Verification
+
+* The starred component is listed under *Your Starred Entities* on your *Home* page.
\ No newline at end of file
diff --git a/modules/software-catalogs/proc-updating-components-in-the-software-catalog.adoc b/modules/software-catalogs/proc-updating-components-in-the-software-catalog.adoc
new file mode 100644
index 0000000000..4840c1fd3a
--- /dev/null
+++ b/modules/software-catalogs/proc-updating-components-in-the-software-catalog.adoc
@@ -0,0 +1,35 @@
+// Module included in the following assemblies:
+//
+// * assemblies/assembly-about-software-catalogs.adoc
+
+:_mod-docs-content-type: PROCEDURE
+[id="proc-updating-components-in-the-software-catalog_{context}"]
+= Updating components in the Software Catalog in your {product} instance
+
+You can update components in the Software Catalog in your {product} instance.
+
+.Prerequisites
+
+* You have installed and configured the {product} instance.
+* You have the required permissions. See link:{authorization-book-url}[{authorization-book-title}].
+
+.Procedure
+
+To update components in the Software Catalog in your {product} instance, complete the following steps:
+
+. In your {product} navigation menu, click *Catalog*.
+. Find the software component that you want to edit, then click the *Edit* icon under *Actions*.
+
++
+[NOTE]
+====
+This action redirects you to the YAML file on GitHub.
+====
+
+. On your remote repository UI, update your YAML file.
+
++
+[NOTE]
+====
+After you merge your changes, the updated metadata in the Software Catalog appears after some time.
+====
\ No newline at end of file
diff --git a/modules/software-catalogs/proc-viewing-software-catalog-yaml.adoc b/modules/software-catalogs/proc-viewing-software-catalog-yaml.adoc
new file mode 100644
index 0000000000..f9b4fff8fe
--- /dev/null
+++ b/modules/software-catalogs/proc-viewing-software-catalog-yaml.adoc
@@ -0,0 +1,22 @@
+// Module included in the following assemblies:
+//
+// * assemblies/assembly-about-software-catalogs.adoc
+
+:_mod-docs-content-type: PROCEDURE
+[id="proc-viewing-software-catalog-yaml_{context}"]
+= Viewing the Software Catalog YAML file in your {product} instance
+
+You can view the Software Catalog YAML file in your {product} instance. The YAML file displays the metadata for the components in your Software Catalog.
+
+.Procedure
+
+To view the Software Catalog YAML file in your {product} instance, complete the following steps:
+
+. In your {product} navigation menu, click *Catalog*.
+. Find the software component that you want to view, then click the *View* icon under *Actions*.
+
++
+[NOTE]
+====
+These steps redirect you to the YAML file on your remote repository.
+====
\ No newline at end of file
diff --git a/modules/techdocs/proc-techdocs-add-docs-from-remote-repo.adoc b/modules/techdocs/proc-techdocs-add-docs-from-remote-repo.adoc
new file mode 100644
index 0000000000..fd38baf500
--- /dev/null
+++ b/modules/techdocs/proc-techdocs-add-docs-from-remote-repo.adoc
@@ -0,0 +1,30 @@
+// Module included in the following assemblies:
+//
+// * assemblies/assembly-techdocs-add-docs.adoc
+
+:_mod-docs-content-type: PROCEDURE
+[id="proc-techdocs-add-docs-from-remote-repo_{context}"]
+= Importing documentation into TechDocs from a remote repository
+
+Teams can store their documentation files in the same remote repository where they store their code files. You can import documentation into your TechDocs plugin from a remote repository that contains the documentation files that your team uses.
+
+.Prerequisites
+
+* Your organization has documentation files stored in a remote repository.
+* You have a `mkdocs.yaml` file in the root directory of your repository.
+* You have the `catalog.entity.create` and `catalog.location.create` permissions to import documentation into TechDocs from a remote repository.
+
+.Procedure
+
+. In your {product} instance, click *Catalog > Self-service > Register Existing Component*.
+. In the *Select URL* box, enter the URL to the `catalog-info.yaml` file that you want to import from your repository using the following format:
++
+`https://github.com/__/__/blob/__/__/catalog-info.yaml`
++
+. Click *Analyze*
+. Click *Finish*
+
+.Verification
+
+. In the {product} navigation menu, click *Docs*.
+. Verify that the documentation that you imported is listed in the table on the *Documentation* page.
diff --git a/modules/techdocs/proc-techdocs-edit-docs.adoc b/modules/techdocs/proc-techdocs-edit-docs.adoc
new file mode 100644
index 0000000000..e93a38f03c
--- /dev/null
+++ b/modules/techdocs/proc-techdocs-edit-docs.adoc
@@ -0,0 +1,17 @@
+// Module included in the following assemblies:
+//
+// * assemblies/assembly-using-techdocs.adoc
+
+:_mod-docs-content-type: PROCEDURE
+[id="proc-techdocs-edit-docs_{context}"]
+= Editing documentation in TechDocs
+
+You can edit a document in your TechDocs plugin directly from the document book page. Any authorized user in your organization can edit a document regardless of whether or not they are the owner of the document.
+
+.Procedure
+
+. In the {product} navigation menu, click *Docs*.
+. In the *Documentation* table, click the name of the document that you want to edit.
+. In the document, click the *Edit this page* icon to open the document in your remote repository.
+. In your remote repository, edit the document as needed.
+. Use the repository provider UI and your usual team processes to commit and merge your changes.
diff --git a/modules/techdocs/proc-techdocs-find-docs.adoc b/modules/techdocs/proc-techdocs-find-docs.adoc
new file mode 100644
index 0000000000..56252f8c73
--- /dev/null
+++ b/modules/techdocs/proc-techdocs-find-docs.adoc
@@ -0,0 +1,28 @@
+// Module included in the following assemblies:
+//
+// * assemblies/assembly-using-techdocs.adoc
+
+:_mod-docs-content-type: PROCEDURE
+[id="proc-techdocs-find-docs_{context}"]
+= Finding documentation in TechDocs
+
+By default, the TechDocs plugin *Documentation* page shows all of the documentation that your organization has imported into your {product} instance. You can use any combination of the following methods to find the documentation that you want to view:
+
+* Enter a keyword in the search bar to see all documents that contain the keyword anywhere in the document.
+* Filter by *Owner* to see only documents that are owned by a particular user or group in your organization.
+* Filter by *Tags* to see only documents that contain a particular tag.
+* Filter by *Owned* to see only documents that are owned by you or by a group that you belong
+* Filter by *Starred* to see only documents that you have added to favorites.
+
+By default, the *All* field shows the total number of documents that have been imported into TechDocs. If you search or use filters, the *All* field shows the number of documents that meet the search and filter criteria that you applied.
+
+.Prerequisites
+
+* The TechDocs plugin in enabled and configured
+* Documentation is imported into TechDocs
+* You have the required roles and permissions to add and view documentation to TechDocs
+
+.Procedure
+
+. In the {product} navigation menu, click *Docs*.
+. On the *Documentation* page, use the search bar, filters, or both to locate the document that you want to view.
diff --git a/modules/techdocs/proc-techdocs-view-docs.adoc b/modules/techdocs/proc-techdocs-view-docs.adoc
new file mode 100644
index 0000000000..6ec272a148
--- /dev/null
+++ b/modules/techdocs/proc-techdocs-view-docs.adoc
@@ -0,0 +1,41 @@
+// Module included in the following assemblies:
+//
+// * assemblies/assembly-using-techdocs.adoc
+
+:_mod-docs-content-type: PROCEDURE
+[id="proc-techdocs-view-docs_{context}"]
+= Viewing documentation in TechDocs
+
+In TechDocs, a document might be part of a book that contains other documents that are related to the same topic.
+
+Clicking the name of a document in the table on the *Documentation* page opens the document in a book page. The name of the book is displayed on book the page. The book page contains the following elements:
+
+* The contents of the document.
+* A search bar that you can use to search for keywords within the document.
+* A navigation menu that you can use to navigate to other documents in the book.
+* A *Table of contents* that you can use to navigate to other sections of the document.
+* A *Next* button that you can use to navigate to the next sequential document in the book.
+
+You can use the elements on the book page to search, view, and navigate the documentation in the book.
+
+.Prerequisites
+
+* The TechDocs plugin in enabled and configured
+* Documentation is imported into TechDocs
+* You have the required roles and permissions to add and view documentation to TechDocs
+* Optional: TechDocs add-ons are installed and configured
+
+.Procedure
+
+. In the {product} navigation menu, click *Docs*.
+. In the *Documentation* table, click the name of the document that you want to view.
+. On the book page, you can do any of the following optional actions:
+* Use installed add-ons that extend the functionality of the default TechDocs plugin.
+* Use the search bar to find keywords within the document.
+* Use any of the following methods to navigate the documentation in the book:
+** Use the *Table of contents* to navigate the any section of the document.
+** Use the navigation menu to navigate to any document in the book.
+** Click *Next* to navigate to the next sequential document in the book.
+
+.Additional resources
+* xref:techdocs-addon[TechDocs add-ons]
diff --git a/modules/using-service-now/ref-supported-servicenow-custom-actions.adoc b/modules/using-service-now/ref-supported-servicenow-custom-actions.adoc
index 9bb34ac420..5aaf71fa4d 100644
--- a/modules/using-service-now/ref-supported-servicenow-custom-actions.adoc
+++ b/modules/using-service-now/ref-supported-servicenow-custom-actions.adoc
@@ -9,8 +9,6 @@ The ServiceNow custom actions enable you to manage records in the {product}. The
* `PATCH`: Updates a resource
* `DELETE`: Deletes a resource
-== ServiceNow custom actions
-
[GET] servicenow:now:table:retrieveRecord::
+
--
@@ -200,7 +198,7 @@ Creates a record in a table in the {product-short}.
|`sysparmInputDisplayValue`
|`boolean`
|Optional
-|Set field values using their display value such as `true` or actual value as `false`. The default value is `false`.
+|Set field values using their display value such as `true` or actual value as `false`. The default value is `false`.
|`sysparmSuppressAutoSysField`
|`boolean`
@@ -414,4 +412,4 @@ Deletes a record from a table in the {product-short}.
|Optional
|Set as `true` to access data across domains if authorized. The default value is `false`.
|===
---
\ No newline at end of file
+--
diff --git a/titles/authentication/master.adoc b/titles/authentication/master.adoc
index 0db027bd0c..15ec5aa224 100644
--- a/titles/authentication/master.adoc
+++ b/titles/authentication/master.adoc
@@ -10,4 +10,3 @@ include::artifacts/attributes.adoc[]
//{abstract}
include::assemblies/assembly-enabling-authentication.adoc[]
-
diff --git a/titles/authorization/master.adoc b/titles/authorization/master.adoc
index 1ac9860e0d..0ebc0a6db2 100644
--- a/titles/authorization/master.adoc
+++ b/titles/authorization/master.adoc
@@ -3,7 +3,7 @@ include::artifacts/attributes.adoc[]
:imagesdir: images
:title: Authorization in {product}
:subtitle: Configuring authorization by using role based access control (RBAC) in {product}
-:abstract: As a {product} platform engineer, you can manage authorizations of other users by using role based access control (RBAC) to meet the specific needs of your organization.
+:abstract: {product} administrators can use role-based access control (RBAC) to manage authorizations of other users.
//[id="{context}"]
//= {title}
diff --git a/titles/configuring/master.adoc b/titles/configuring/master.adoc
index 73ab3513eb..d1a534113a 100644
--- a/titles/configuring/master.adoc
+++ b/titles/configuring/master.adoc
@@ -30,8 +30,10 @@ include::assemblies/assembly-configuring-a-proxy.adoc[leveloffset=+1]
include::modules/installation/proc-configuring-an-rhdh-instance-with-tls-in-kubernetes.adoc[leveloffset=+1]
-include::modules/dynamic-plugins/con-dynamic-plugins-cache.adoc[ leveloffset=+1]
+include::assemblies/dynamic-plugins/assembly-using-the-dynamic-plugins-cache.adoc[ leveloffset=+1]
-include::modules/dynamic-plugins/proc-installing-and-configuring-redis-cache.adoc[leveloffset=+1]
+include::assemblies/assembly-configuring-default-secret-pvc-mounts.adoc[leveloffset=+1]
+
+include::modules/configuring/proc-enabling-the-rhdh-plugin-assets-cache.adoc[leveloffset=+1]
\ No newline at end of file
diff --git a/titles/customizing/master.adoc b/titles/customizing/master.adoc
index 3b54976696..dc95144f50 100644
--- a/titles/customizing/master.adoc
+++ b/titles/customizing/master.adoc
@@ -20,14 +20,18 @@ include::modules/customizing/proc-customizing-the-backend-secret.adoc[leveloffse
include::assemblies/assembly-configuring-templates.adoc[leveloffset=+1]
-include::modules/customizing-the-learning-paths/proc-customize-rhdh-learning-paths.adoc[leveloffset=+1]
+include::assemblies/assembly-about-software-catalogs.adoc[leveloffset=+1]
+include::assemblies/assembly-customizing-the-learning-paths.adoc[leveloffset=+1]
+
include::assemblies/assembly-configuring-the-global-header.adoc[leveloffset=+1]
+
include::assemblies/assembly-configuring-a-floating-action-button.adoc[leveloffset=+1]
-include::modules/customizing-the-tech-radar-page/proc-customize-rhdh-tech-radar-page.adoc[leveloffset=+1]
+
+include::assemblies/assembly-customizing-the-tech-radar-page.adoc[leveloffset=+1]
include::assemblies/assembly-customizing-the-appearance.adoc[leveloffset=+1]
@@ -38,3 +42,4 @@ include::assemblies/assembly-customizing-the-homepage.adoc[leveloffset=+1]
include::assemblies/assembly-customizing-the-quick-access-card.adoc[leveloffset=+1]
+include::modules/customizing/proc-customizing-rhdh-metadata-card.adoc[leveloffset=+1]
diff --git a/titles/install-rhdh-air-gapped/master.adoc b/titles/install-rhdh-air-gapped/master.adoc
index 19b1cf2d92..cefdf890b4 100644
--- a/titles/install-rhdh-air-gapped/master.adoc
+++ b/titles/install-rhdh-air-gapped/master.adoc
@@ -1,14 +1,18 @@
[id="title-install-rhdh-air-grapped"]
include::artifacts/attributes.adoc[]
:title: Installing {product} in an air-gapped environment
-:subtitle: Running {product} on {ocp-brand-name} in a networik restricted environment by using either the Operator or Helm chart.
-:abstract: Platform administrators can configure roles, permissions, and other settings to enable other authorized users to deploy an air-gapped {product-short} instance on any supported platform using either the Operator or Helm chart.
+:subtitle: Running {product} on {ocp-brand-name} in a networik restricted environment by using either the Operator or Helm chart
+:abstract: Platform administrators can configure roles, permissions, and other settings to enable other authorized users to deploy an air-gapped {product-short} instance on any supported platform using either the Operator or Helm chart
= {title}
:context: title-install-rhdh-air-grapped
:imagesdir: images
include::modules/installation/con-airgapped-environment.adoc[leveloffset=+1]
+// OCP
include::assemblies/assembly-install-rhdh-airgapped-environment-ocp-operator.adoc[leveloffset=+1]
-include::assemblies/assembly-install-rhdh-airgapped-environment-ocp-helm.adoc[leveloffset=+1]
\ No newline at end of file
+include::assemblies/assembly-install-rhdh-airgapped-environment-ocp-helm.adoc[leveloffset=+1]
+
+// Kubernetes
+include::assemblies/assembly-install-rhdh-airgapped-environment-k8s-helm.adoc[leveloffset=+1]
diff --git a/titles/install-rhdh-eks/master.adoc b/titles/install-rhdh-eks/master.adoc
index bc93f42610..8662156ee3 100644
--- a/titles/install-rhdh-eks/master.adoc
+++ b/titles/install-rhdh-eks/master.adoc
@@ -16,7 +16,7 @@ You can install {product} on {eks-brand-name} ({eks-short}) using one of the fol
* The {product} Helm chart
// Operator method
-include::modules/installation/proc-rhdh-deploy-eks-operator.adoc[leveloffset=+1]
+include::assemblies/assembly-install-rhdh-eks-operator.adoc[leveloffset=+1]
include::modules/installation/proc-deploy-rhdh-instance-eks.adoc[leveloffset=+2]
diff --git a/titles/monitoring-and-logging/master.adoc b/titles/monitoring-and-logging/master.adoc
index 9954c0ebf4..ffb370749d 100644
--- a/titles/monitoring-and-logging/master.adoc
+++ b/titles/monitoring-and-logging/master.adoc
@@ -11,7 +11,7 @@ include::artifacts/attributes.adoc[]
include::assemblies/assembly-rhdh-observability.adoc[leveloffset=+1]
//AWS
-include::modules/observe/proc-rhdh-monitoring-logging-aws.adoc[leveloffset=+1]
+include::assemblies/assembly-monitoring-and-logging-with-aws.adoc[leveloffset=+1]
//AKS
include::assemblies/assembly-monitoring-and-logging-aks.adoc[leveloffset=+1]
diff --git a/titles/rel-notes-rhdh/title-rhdh-release-notes.adoc b/titles/rel-notes-rhdh/title-rhdh-release-notes.adoc
index 5cdd60e777..ba700d5da5 100644
--- a/titles/rel-notes-rhdh/title-rhdh-release-notes.adoc
+++ b/titles/rel-notes-rhdh/title-rhdh-release-notes.adoc
@@ -18,13 +18,14 @@ include::modules/release-notes/ref-release-notes-breaking-changes.adoc[leveloffs
include::modules/release-notes/ref-release-notes-deprecated-functionalities.adoc[leveloffset=+1]
-include::modules/release-notes/ref-release-notes-technology-preview.adoc[leveloffset=+1]
+// include::modules/release-notes/ref-release-notes-technology-preview.adoc[leveloffset=+1]
+include::modules/release-notes/ref-release-notes-developer-preview.adoc[leveloffset=+1]
include::modules/release-notes/ref-release-notes-fixed-issues.adoc[leveloffset=+1]
-include::assemblies/assembly-release-notes-fixed-security-issues.adoc[leveloffset=+1]
+include::modules/release-notes/ref-release-notes-fixed-security-issues.adoc[leveloffset=+1]
include::modules/release-notes/ref-release-notes-known-issues.adoc[leveloffset=+1]
diff --git a/titles/techdocs/docinfo.xml b/titles/techdocs/docinfo.xml
index 42d897b71d..ce010bd06e 100644
--- a/titles/techdocs/docinfo.xml
+++ b/titles/techdocs/docinfo.xml
@@ -1,10 +1,9 @@
TechDocs for {product}{product}{product-version}
-
-
+Use the TechDocs plugin to read and manage your team's technical documentation in one place. Further enhance and customize your TechDocs experience with add-ons.
- Your organization can use the built-in TechDocs plugin for {product} to create, find, and use technical documentation in a central location and in a standardized way. Use supported TechDocs add-ons, or create your own, to further enhance your documentation experience.
+ Your organization can use the built-in TechDocs plugin for {product} to create, find, and use technical documentation in a central location and in a standardized way. Documentation files are stored alongside your code and rendered in the Docs tab. Use supported TechDocs add-ons, or create your own, to further enhance your documentation experience.{company-name} Customer Content Services
diff --git a/titles/techdocs/master.adoc b/titles/techdocs/master.adoc
index cf9a0a8a03..0e4c731330 100644
--- a/titles/techdocs/master.adoc
+++ b/titles/techdocs/master.adoc
@@ -1,8 +1,8 @@
-[id="title-techdocs-rhdh"]
include::artifacts/attributes.adoc[]
+
+:context: customizing-display
+[id="{context}"]
= TechDocs for {product}
-:context: title-techdocs-rhdh
-:imagesdir: images
// about techdocs
include::modules/techdocs/con-techdocs-about.adoc[leveloffset=+1]
@@ -10,6 +10,9 @@ include::modules/techdocs/con-techdocs-about.adoc[leveloffset=+1]
// techdocs configuration
include::assemblies/assembly-configuring-techdocs.adoc[leveloffset=+1]
+// using techdocs
+include::assemblies/assembly-using-techdocs.adoc[leveloffset=+1]
+
// techdocs add-ons
include::assemblies/assembly-techdocs-addons.adoc[leveloffset=+1]
diff --git a/titles/telemetry/master.adoc b/titles/telemetry/master.adoc
index 52a59af949..2608f4895a 100644
--- a/titles/telemetry/master.adoc
+++ b/titles/telemetry/master.adoc
@@ -1,11 +1,20 @@
include::artifacts/attributes.adoc[]
:context: title-telemetry
:imagesdir: images
-:title: Telemetry data collection
-:subtitle: Collecting and analyzing telemetry data to enhance {product} experience
-:abstract: As a {product} administrator, you can collect and analyze telemetry data to enhance your {product} experience.
+:title: Telemetry data collection and analysis
+:subtitle: Collecting and analyzing web analytics and system observability data to enhance {product} experience
+:abstract: As a {product} administrator, you can collect and analyze two distinct types of telemetry data: web analytics using Segment and system observability using OpenTelemetry, to enhance the {product} experience.
= {title}
-//Telemetry data collection
-include::assemblies/assembly-rhdh-telemetry.adoc[leveloffset=+1]
+//Telemetry data collection and analysis
+include::modules/analytics/con-telemetry-data-collection-and-analysis.adoc[leveloffset=+1]
+
+// disabling telemetry
+include::assemblies/assembly-disabling-telemetry-data-collection.adoc[leveloffset=+1]
+
+// enabling telemetry
+include::assemblies/assembly-enabling-telemetry-data-collection.adoc[leveloffset=+1]
+
+// customizing segment source
+include::assemblies/assembly-customizing-segment-source.adoc[leveloffset=+1]
\ No newline at end of file