diff --git a/enterprise_versioned_docs/version-3.2.2/administration/active-and-renew-license.md b/enterprise_versioned_docs/version-3.2.2/administration/active-and-renew-license.md
new file mode 100644
index 00000000..73d2720c
--- /dev/null
+++ b/enterprise_versioned_docs/version-3.2.2/administration/active-and-renew-license.md
@@ -0,0 +1,4 @@
+---
+title: Active and Renew License
+slug: /administration/active-and-renew-license
+---
\ No newline at end of file
diff --git a/enterprise_versioned_docs/version-3.2.2/administration/manage-token-2-use-api7-ee-api.md b/enterprise_versioned_docs/version-3.2.2/administration/manage-token-2-use-api7-ee-api.md
new file mode 100644
index 00000000..684cac88
--- /dev/null
+++ b/enterprise_versioned_docs/version-3.2.2/administration/manage-token-2-use-api7-ee-api.md
@@ -0,0 +1,4 @@
+---
+title: Manage Token to use API7 Enterprise Edition API
+slug: /administration/manage-token-2-use-api7-ee-api
+---
\ No newline at end of file
diff --git a/enterprise_versioned_docs/version-3.2.2/administration/track-user-actions-for-security-audition.md b/enterprise_versioned_docs/version-3.2.2/administration/track-user-actions-for-security-audition.md
new file mode 100644
index 00000000..283c5c1a
--- /dev/null
+++ b/enterprise_versioned_docs/version-3.2.2/administration/track-user-actions-for-security-audition.md
@@ -0,0 +1,4 @@
+---
+title: Track User Actions for Security Auditing
+slug: /administration/track-user-actions-for-security-audition
+---
\ No newline at end of file
diff --git a/enterprise_versioned_docs/version-3.2.2/api-full-lifecycle-management/api-consumption/allow-list-based-accesss-control-of-consumers.md b/enterprise_versioned_docs/version-3.2.2/api-full-lifecycle-management/api-consumption/allow-list-based-accesss-control-of-consumers.md
new file mode 100644
index 00000000..2428afda
--- /dev/null
+++ b/enterprise_versioned_docs/version-3.2.2/api-full-lifecycle-management/api-consumption/allow-list-based-accesss-control-of-consumers.md
@@ -0,0 +1,4 @@
+---
+title: Allow List-Based Access Control for Consumers
+slug: /api-consumption/allow-list-based-accesss-control-of-consumers
+---
\ No newline at end of file
diff --git a/enterprise_versioned_docs/version-3.2.2/api-full-lifecycle-management/api-consumption/manage-api-consumer-credentials.md b/enterprise_versioned_docs/version-3.2.2/api-full-lifecycle-management/api-consumption/manage-api-consumer-credentials.md
new file mode 100644
index 00000000..3b2f954f
--- /dev/null
+++ b/enterprise_versioned_docs/version-3.2.2/api-full-lifecycle-management/api-consumption/manage-api-consumer-credentials.md
@@ -0,0 +1,4 @@
+---
+title: Manage API Consumer Credentials
+slug: /api-consumption/manage-api-consumer-credentials
+---
\ No newline at end of file
diff --git a/enterprise_versioned_docs/version-3.2.2/api-full-lifecycle-management/api-consumption/publish-api-4-discovery-and-integration.md b/enterprise_versioned_docs/version-3.2.2/api-full-lifecycle-management/api-consumption/publish-api-4-discovery-and-integration.md
new file mode 100644
index 00000000..8c2a48d2
--- /dev/null
+++ b/enterprise_versioned_docs/version-3.2.2/api-full-lifecycle-management/api-consumption/publish-api-4-discovery-and-integration.md
@@ -0,0 +1,4 @@
+---
+title: Publish Your API for Discovery and Integration
+slug: /api-consumption/publish-api-4-discovery-and-integration
+---
\ No newline at end of file
diff --git a/enterprise_versioned_docs/version-3.2.2/api-full-lifecycle-management/api-design/export-openapi-sepc.md b/enterprise_versioned_docs/version-3.2.2/api-full-lifecycle-management/api-design/export-openapi-sepc.md
new file mode 100644
index 00000000..ada1a173
--- /dev/null
+++ b/enterprise_versioned_docs/version-3.2.2/api-full-lifecycle-management/api-design/export-openapi-sepc.md
@@ -0,0 +1,4 @@
+---
+title: Export OpenAPI Specification
+slug: /api-design/export-openapi-sepc
+---
\ No newline at end of file
diff --git a/enterprise_versioned_docs/version-3.2.2/api-full-lifecycle-management/api-design/plan-api-endpoints.md b/enterprise_versioned_docs/version-3.2.2/api-full-lifecycle-management/api-design/plan-api-endpoints.md
new file mode 100644
index 00000000..1ebbd49f
--- /dev/null
+++ b/enterprise_versioned_docs/version-3.2.2/api-full-lifecycle-management/api-design/plan-api-endpoints.md
@@ -0,0 +1,4 @@
+---
+title: Plan Your API Endpoints
+slug: /api-design/plan-api-endpoints
+---
\ No newline at end of file
diff --git a/enterprise_versioned_docs/version-3.2.2/api-full-lifecycle-management/api-publish/apply-api-rate-limit-policies.md b/enterprise_versioned_docs/version-3.2.2/api-full-lifecycle-management/api-publish/apply-api-rate-limit-policies.md
new file mode 100644
index 00000000..f53405b0
--- /dev/null
+++ b/enterprise_versioned_docs/version-3.2.2/api-full-lifecycle-management/api-publish/apply-api-rate-limit-policies.md
@@ -0,0 +1,4 @@
+---
+title: Apply API Rate Limit Policies
+slug: /api-design/apply-api-rate-limit-policies
+---
\ No newline at end of file
diff --git a/enterprise_versioned_docs/version-3.2.2/api-full-lifecycle-management/api-publish/configure-api-upstream.md b/enterprise_versioned_docs/version-3.2.2/api-full-lifecycle-management/api-publish/configure-api-upstream.md
new file mode 100644
index 00000000..ce7a5db1
--- /dev/null
+++ b/enterprise_versioned_docs/version-3.2.2/api-full-lifecycle-management/api-publish/configure-api-upstream.md
@@ -0,0 +1,4 @@
+---
+title: Configure API Upstream
+slug: /api-design/configure-api-upstream
+---
\ No newline at end of file
diff --git a/enterprise_versioned_docs/version-3.2.2/api-full-lifecycle-management/api-publish/import-api-define-of-service.md b/enterprise_versioned_docs/version-3.2.2/api-full-lifecycle-management/api-publish/import-api-define-of-service.md
new file mode 100644
index 00000000..2187ccd5
--- /dev/null
+++ b/enterprise_versioned_docs/version-3.2.2/api-full-lifecycle-management/api-publish/import-api-define-of-service.md
@@ -0,0 +1,4 @@
+---
+title: Import API Definition of Your Service
+slug: /api-design/import-api-define-of-service
+---
\ No newline at end of file
diff --git a/enterprise_versioned_docs/version-3.2.2/api-full-lifecycle-management/api-publish/set-up-api-auth.md b/enterprise_versioned_docs/version-3.2.2/api-full-lifecycle-management/api-publish/set-up-api-auth.md
new file mode 100644
index 00000000..5022bf8a
--- /dev/null
+++ b/enterprise_versioned_docs/version-3.2.2/api-full-lifecycle-management/api-publish/set-up-api-auth.md
@@ -0,0 +1,4 @@
+---
+title: Set Up API Authentication
+slug: /api-design/set-up-api-auth
+---
\ No newline at end of file
diff --git a/enterprise_versioned_docs/version-3.2.2/api-full-lifecycle-management/api-publish/transform-api-req-2-simplify-integration.md b/enterprise_versioned_docs/version-3.2.2/api-full-lifecycle-management/api-publish/transform-api-req-2-simplify-integration.md
new file mode 100644
index 00000000..e0983437
--- /dev/null
+++ b/enterprise_versioned_docs/version-3.2.2/api-full-lifecycle-management/api-publish/transform-api-req-2-simplify-integration.md
@@ -0,0 +1,4 @@
+---
+title: Transform API Requests to Simplify Integration
+slug: /api-design/transform-api-req-2-simplify-integration
+---
\ No newline at end of file
diff --git a/enterprise_versioned_docs/version-3.2.2/api-full-lifecycle-management/api-runtime/log-api-traffic.md b/enterprise_versioned_docs/version-3.2.2/api-full-lifecycle-management/api-runtime/log-api-traffic.md
new file mode 100644
index 00000000..756d835c
--- /dev/null
+++ b/enterprise_versioned_docs/version-3.2.2/api-full-lifecycle-management/api-runtime/log-api-traffic.md
@@ -0,0 +1,4 @@
+---
+title: Log API Traffic
+slug: /api-runtime/log-api-traffic
+---
\ No newline at end of file
diff --git a/enterprise_versioned_docs/version-3.2.2/api-full-lifecycle-management/api-runtime/monitor-api-metrics.md b/enterprise_versioned_docs/version-3.2.2/api-full-lifecycle-management/api-runtime/monitor-api-metrics.md
new file mode 100644
index 00000000..1f022f3c
--- /dev/null
+++ b/enterprise_versioned_docs/version-3.2.2/api-full-lifecycle-management/api-runtime/monitor-api-metrics.md
@@ -0,0 +1,4 @@
+---
+title: Monitor API Metrics
+slug: /api-runtime/monitor-api-metrics
+---
\ No newline at end of file
diff --git a/enterprise_versioned_docs/version-3.2.2/api-full-lifecycle-management/api-runtime/trigger-alerts-of-unusual-api-activities.md b/enterprise_versioned_docs/version-3.2.2/api-full-lifecycle-management/api-runtime/trigger-alerts-of-unusual-api-activities.md
new file mode 100644
index 00000000..e98627d5
--- /dev/null
+++ b/enterprise_versioned_docs/version-3.2.2/api-full-lifecycle-management/api-runtime/trigger-alerts-of-unusual-api-activities.md
@@ -0,0 +1,4 @@
+---
+title: Trigger Alerts for Unusual API Activities
+slug: /api-runtime/trigger-alerts-of-unusual-api-activities
+---
\ No newline at end of file
diff --git a/enterprise_versioned_docs/version-3.2.2/background-information/key-concepts/consumers.md b/enterprise_versioned_docs/version-3.2.2/background-information/key-concepts/consumers.md
deleted file mode 100644
index 48de499c..00000000
--- a/enterprise_versioned_docs/version-3.2.2/background-information/key-concepts/consumers.md
+++ /dev/null
@@ -1,118 +0,0 @@
----
-title: Consumers
-slug: /key-concepts/consumers
----
-
-In this document, you will learn the basic concepts of consumers in APISIX and why you need them. You will be introduced to a few relevant concepts, including how to pass consumer information to upstream, consumer access restriction, as well as consumer authentication and authorization.
-
-Explore additional resources at the end of the document for more information on related topics.
-
-## Overview
-
-In APISIX, a _consumer_ object represents a user, application, or host that sends requests to the API gateway and consumes backend services. It is used in conjunction with the authentication system; that is, every consumer should be configured with at least one authentication plugin.
-
-Consumer objects come in handy if you have different consumers sending requests to your system, and you need APISIX to perform certain functions, such as rate limiting, based on consumers. These functionalities are provided by APISIX plugins configured in consumers.
-
-The following diagram illustrates an example of APISIX with one route and two consumers, where one consumer, `FetchBot`, is a data fetching bot, and the other consumer, `JohnDoe`, is a human end user. Plugin `key-auth` is configured on route and consumers, so that requests will be authenticated with API keys. To access the internal service, `FetchBot` would send its requests with `bot-key` and `JohnDoe` would send his request with `john-key`.
-
-
-
-

-
-
-
-This configuration ensures that only authenticated requests can interact with the internal service exposed on `/internal`. If a request is sent to APISIX:
-
-* without any key or with a wrong key, the request is rejected.
-* with `bot-key`, the request is authenticated and seen as sent by `FetchBot` to fetch data from the internal service. The rate limiting plugin `limit-count` on the consumer takes effect, limiting the number of requests within a 5-second window to 2. If the rate limiting threshold has not been met, the request is forwarded to the upstream service; otherwise, it is rejected.
-* with `john-key`, the request is authenticated and seen as sent by `JohnDoe`, subsequently being forwarded to the upstream service.
-
-Note that the authentication plugin is executed before the rate limiting plugin in this scenario, in accordance with the [plugins execution phases](./plugins.md#plugins-execution-lifecycle).
-
-## Passing Consumer Information to Upstream
-
-For certain use cases, such as logging, analytics, and auditing, you might want to pass consumer information to upstream services. Consumer information, by default, is not exposed to upstream; however, you can use `proxy-rewrite` plugin to include the needed information in the header:
-
-```json
-{
- "plugins":{
- ...,
- "proxy-rewrite":{
- "headers":{
- "set":{
- "X-Consumer-Name":"$consumer_name"
- }
- }
- }
- }
-}
-```
-
-## Consumer Access Restriction
-
-You can control request access to your API by imposing restrictions based on consumer name, HTTP methods, or other parameters in the `consumer-restriction` plugin.
-
-For example, if you want to blacklist `FetchBot` from accessing your internal service without changing any consumers configuration in the example from [overview](#overview), you can update the plugin's configuration in route to the following:
-
-```json
-{
- "plugins":{
- "key-auth":{},
- "consumer-restriction":{
- "blacklist":["FetchBot"]
- }
- }
-}
-```
-
-Or, if you want to strictly allow `FetchBot`'s access by HTTP GET method, you can update the plugin's configuration (in either the route or the consumer) to the following:
-
-```json
-{
- "plugins":{
- ...,
- "consumer-restriction":{
- "allowed_by_methods":[
- {
- "user":"FetchBot",
- "methods":["GET"]
- }
- ]
- }
- }
-}
-```
-
-The `consumer-restriction` plugin can also be used with [routes](./routes.md), [services](./services.md), and [plugin global rules](./plugin-global-rules.md). For more details on the plugin usage, please refer to the plugin reference guide (coming soon).
-
-[//]:
-
-## Authentication & Authorization
-
-There are two main design patterns for building authentication and authorization in an APISIX-based architecture.
-
-The first and most commonly adopted approach is to authenticate and authorize requests through a third-party [identity provider (IdP)](https://en.wikipedia.org/wiki/Identity_provider), such as [Keycloak](https://www.keycloak.org):
-
-
-

-
-
-
-In some environments, a request might need to go through more than one IdP before it can be forwarded to the upstream service. In such cases, you can configure multiple authentication plugins, each corresponding to an IdP, on one consumer; only when all IdPs have granted access to a request will APISIX show success response.
-
-With multiple authentication plugins in place, the [plugins order of execution](./plugins.md#plugins-execution-order) is determined by the plugin's priority, which can be overridden with `_meta.priority`.
-
-The second and a more basic approach is to perform authentication and authorization on the API gateway itself, using `key-auth`, `basic-auth`, `jwt-auth`, `hmac-auth` plugins:
-
-
-

-
-
-
-For details about how to configure authentication and authorization for your specific needs, please refer to the authentication chapter in How-To Guides (coming soon).
-
-## Additional Resource(s)
-
-* Getting Started - [Configure Key Authentication](../../getting-started/key-authentication.md)
-[//]:
-[//]:
diff --git a/enterprise_versioned_docs/version-3.2.2/background-information/key-concepts/plugin-global-rules.md b/enterprise_versioned_docs/version-3.2.2/background-information/key-concepts/plugin-global-rules.md
deleted file mode 100644
index f671a961..00000000
--- a/enterprise_versioned_docs/version-3.2.2/background-information/key-concepts/plugin-global-rules.md
+++ /dev/null
@@ -1,34 +0,0 @@
----
-title: Plugin Global Rules
-slug: /key-concepts/plugin-global-rules
----
-
-In this document, you will learn the basic concept of plugin global rules in APISIX and why you may need them.
-
-Explore additional resources at the end of the document for more information on related topics.
-
-## Overview
-
-In APISIX, a _global rule_ object is used to create [plugins](./plugins.md) that are triggered on every incoming request and executed before other plugins locally bound to objects, such as [routes](./routes.md), [services](./services.md), and [consumers](./consumers.md). Certain plugins, such as rate limiting and observability plugins, are frequently enabled globally in order to provide consistent and comprehensive protection for APIs.
-
-The following diagram illustrates an example of enabling key authentication plugin globally for all incoming requests, where `key-auth` plugin is configured in both a global rule and a consumer. The `proxy-rewrite` plugin is configured on a route to modify the request's [HTTP header](https://developer.mozilla.org/en-US/docs/Glossary/HTTP_header), for demonstrating [plugins execution order](./plugins.md#plugins-execution-order):
-
-
-
-

-
-
-
-This configuration ensures that only the authenticated requests are allowed to interact with the upstream service. If a request is sent to APISIX:
-
-* without any key or with a wrong key, the request is rejected.
-* with `global-key` but to a non-existent route, the request is authenticated but APISIX returns an error warning users that the route is not found.
-* with `global-key` to an existing route, the request is first authenticated, then the header of the request is modified by the plugin on the route, and finally the request is forwarded to the upstream service.
-
-The example above used two different plugins in a global rule and a route. If the same plugin is configured in both objects, both instances of the plugin will be [executed sequentially](./plugins.md#plugins-execution-order), rather than overwriting each other.
-
-## Additional Resource(s)
-
-* Key Concepts
- * [Plugins](./plugins.md)
- * [Consumers](./consumers.md)
diff --git a/enterprise_versioned_docs/version-3.2.2/background-information/key-concepts/plugin-metadata.md b/enterprise_versioned_docs/version-3.2.2/background-information/key-concepts/plugin-metadata.md
deleted file mode 100644
index b92bb418..00000000
--- a/enterprise_versioned_docs/version-3.2.2/background-information/key-concepts/plugin-metadata.md
+++ /dev/null
@@ -1,38 +0,0 @@
----
-title: Plugin Metadata
-slug: /key-concepts/plugin-metadata
----
-
-In this document, you will learn the basic concept of plugin metadata in APISIX and why you may need them.
-
-Explore additional resources at the end of the document for more information on related topics.
-
-## Overview
-
-In APISIX, a _plugin metadata_ object is used to configure the common metadata field(s) of all plugin instances sharing the same plugin name. It is useful when a plugin is enabled across multiple objects and requires a universal update to their metadata fields.
-
-The following diagram illustrates the concept of plugin metadata using two instances of `syslog` plugins on two different routes, as well as a plugin metadata object setting a global `log_format` for the `syslog` plugin:
-
-
-
-
-

-
-
-
-
-Without otherwise specified, the `log_format` on plugin metadata object should apply the same log format uniformly to both `syslog` plugins. However, since the `syslog` plugin on the `/order` route has a different `log_format`, requests visiting this route will generate logs in the `log_format` specified by the plugin in route.
-
-In general, if a field of a plugin is defined in both the plugin metadata and another object, such as a route, the definition on the other object **takes precedence** over the global definition in plugin metadata to provide a more granular level of control.
-
-Plugin metadata objects should only be used for plugins that have metadata fields. For more details on which plugins have metadata fields, please refer to the plugin reference guide (coming soon).
-
-[//]:
-
-## Additional Resource(s)
-
-* Key Concepts - [Plugins](./plugins.md)
-
-[//]:
-[//]:
-[//]:
diff --git a/enterprise_versioned_docs/version-3.2.2/background-information/key-concepts/protos.md b/enterprise_versioned_docs/version-3.2.2/background-information/key-concepts/protos.md
deleted file mode 100644
index 89f049d4..00000000
--- a/enterprise_versioned_docs/version-3.2.2/background-information/key-concepts/protos.md
+++ /dev/null
@@ -1,34 +0,0 @@
----
-title: Protos
-slug: /key-concepts/protos
----
-
-In this document, you will learn the basic concept of protos in APISIX and why you may need them.
-
-Explore additional resources at the end of the document for more information on related topics.
-
-## Overview
-
-In APISIX, _proto_ objects contain [protocol buffer (protobuf)](https://protobuf.dev) definitions which define the service interface and message types used in communication with upstream [gRPC](https://grpc.io) services.
-
-The following diagram illustrates the concept of a proto object using an example of APISIX transcoding between HTTP and gRPC. In this example, the route `/grpc-echo` is associated with the plugin `grpc-transcode` and a proto object:
-
-
-

-
-
-
-The gRPC server is registered with `EchoService` defined in `echo.proto` file, which echoes back string input from incoming requests.
-
-To enable gRPC communication between APISIX and server, the protocol buffer definitions specified in the `echo.proto` file are added to the proto object in APISIX. This ensures that APISIX and the gRPC server agree on the specifications of data exchange, allowing APISIX to effectively communicate with the gRPC server and relay the responses back to the client over HTTP.
-
-To learn more about how to use `grpc-transcode` for protocol transcoding, see [Transcode HTTP to gRPC](../../how-to-guide/transformation/transcode-http-to-grpc.md).
-
-[//]:
-
-## Additional Resource(s)
-
-* Key Concepts - [Routes](./routes.md)
-* How-To Guide - [Transcode HTTP to gRPC](../../how-to-guide/transformation/transcode-http-to-grpc.md)
-
-[//]:
diff --git a/enterprise_versioned_docs/version-3.2.2/background-information/key-concepts/routes.md b/enterprise_versioned_docs/version-3.2.2/background-information/key-concepts/routes.md
deleted file mode 100644
index 8ffd9a78..00000000
--- a/enterprise_versioned_docs/version-3.2.2/background-information/key-concepts/routes.md
+++ /dev/null
@@ -1,49 +0,0 @@
----
-title: Routes
-slug: /key-concepts/routes
----
-
-In this document, you will learn the basic concept of routes in APISIX, different routing options APISIX offers, as well as drawbacks and solutions to repetitive route configurations.
-
-Explore additional resources at the end of the document for more information on related topics.
-
-## Overview
-
-_Routes_ define paths to upstream services. In APISIX, a route is responsible for matching client requests based on configured rules, loading and executing the corresponding plugins, and forwarding requests to the specified upstream endpoints.
-
-A simple route can be set up with a path-matching URI and a corresponding upstream address. The diagram below shows an example of users sending two HTTP requests to the APISIX API gateway, which are forwarded accordingly per the configured rules in routes:
-
-
-
-
-

-
-
-
-
-Routes are often configured with plugins as well. For example, [configuring the rate-limit plugin in a route](../../getting-started/rate-limiting.md) will enable rate-limiting effects.
-
-## Routing Options
-
-APISIX offers three HTTP routing options:
-
-1. `radixtree_host_uri` routes requests by hosts and URI paths. It can be used to route north-south traffic between clients and servers.
-
-2. `radixtree_uri` routes requests by URI paths. It can be used to route east-west traffic, such as between microservices.
-
-3. `radixtree_uri_with_parameter` enhances on `radixtree_uri` to support the use of parameter in path matching.
-
-These routing options can be configured in `conf/config.yaml` under `apisix.router.http`.
-
-## Routes, Upstreams, and Services
-
-While routes are essential in defining the paths of traffic flows, there are drawbacks to repetitive route configurations (i.e. hard coding **the same upstream addresses or plugin names** for a group of routes). During the time of updates, the repetitive field(s) of these routes will need to be traversed and updated one by one. Configurations like this increase a lot of maintenance costs as a result, especially in large-scale systems with many routes.
-
-To address this issue, [Upstreams](./upstreams.md) and [Services](./services.md) were designed to abstract away repetitive information and reduce redundancies, following the [DRY principle](https://en.wikipedia.org/wiki/Don%27t_repeat_yourself).
-
-## Additional Resource(s)
-
-* Getting Started - [Configure Routes](../../getting-started/configure-routes.md)
-
-[//]:
-[//]:
diff --git a/enterprise_versioned_docs/version-3.2.2/background-information/key-concepts/secrets.md b/enterprise_versioned_docs/version-3.2.2/background-information/key-concepts/secrets.md
deleted file mode 100644
index cf88729a..00000000
--- a/enterprise_versioned_docs/version-3.2.2/background-information/key-concepts/secrets.md
+++ /dev/null
@@ -1,41 +0,0 @@
----
-title: Secrets
-slug: /key-concepts/secrets
----
-
-In this document, you will learn the basic concept of secrets in APISIX and why you may need them.
-
-Explore additional resources at the end of the document for more information on related topics.
-
-## Overview
-
-In APISIX, a *secret* object is used to set up integration with an external secret manager, so that APISIX can establish connections and fetch secrets from the secret manager dynamically at runtime.
-
-The following diagram illustrates the concept of a secret object using an example, where [key authentication](../../getting-started/key-authentication.md) is enabled for a user, John Doe, and user credentials are stored in an [HashiCorp Vault](https://www.vaultproject.io) server:
-
-
-

-
-
-As demonstrated, when APISIX is used in conjunction with an external secret manager, the field for secret is defined as a variable starting with a fixed prefix `$secret://`, appended with the name of the secret manager, APISIX secret object ID, username, and other details.
-
-Specifically, if Vault is used as the secret manager, the APISIX secret object should specify:
-
-* `uri`: location where Vault server is hosted
-* `prefix`: path prefix corresponding to a secret engine that Vault should route traffic to
-* `token`: token for APISIX to authenticate to Vault and establish connection
-
-These configurations ensure that John Doe can send requests to APISIX and access the back-end service with the correct key. Requests from unauthenticated users are rejected by APISIX.
-
-For more details on the secret object usage, please refer to the Admin API reference (coming soon).
-
-[//]:
-
-## Additional Resource(s)
-
-* Getting Started - [Key Authentication](../../getting-started/key-authentication.md)
-* Key Concepts
- * [Plugins](./plugins.md)
- * [Consumers](./consumers.md)
-
-[//]:
diff --git a/enterprise_versioned_docs/version-3.2.2/background-information/key-concepts/services.md b/enterprise_versioned_docs/version-3.2.2/background-information/key-concepts/services.md
deleted file mode 100644
index 42fd68d0..00000000
--- a/enterprise_versioned_docs/version-3.2.2/background-information/key-concepts/services.md
+++ /dev/null
@@ -1,39 +0,0 @@
----
-title: Services
-slug: /key-concepts/services
----
-
-In this document, you will learn the basic concept of _services_ in APISIX and the advantages of using services.
-
-Explore additional resources at the end for more information on related topics.
-
-## Overview
-
-A _service_ object in APISIX is an abstraction of a backend application providing logically related functionalities. The relationship between a service and routes is usually one-to-many (1:N).
-
-The following diagram illustrates an example of a service object used in architecting a data analytics (`da`) backend at Foodbar Company (a fictional company), where there are two routes with distinctive configurations - one for getting data ([HTTP GET](https://developer.mozilla.org/en-US/docs/Web/HTTP/Methods/GET)) and the other one for uploading data ([HTTP POST](https://developer.mozilla.org/en-US/docs/Web/HTTP/Methods/POST)):
-
-
-
-
-

-
-
-
-
-Note that the [rate-limiting](../../getting-started/rate-limiting.md) plugin `limit-count` is configured once on the service object, regulating incoming client requests from the two routes. Similarly, the upstream address is also configured only once on the [upstream](./upstreams.md) object. While plugins and upstreams can also be configured in routes individually (and repetitively) to serve the same purpose, it is advised against adopting this approach, as things quickly become hard to manage when the system grows. Using upstreams and services help reduce the risk of data anomalies and minimize operational costs.
-
-For simplicity, the example above only pointed the traffic to one upstream node. You can add more upstream nodes, when needed, to [enable load balancing](../../getting-started/load-balancing.md), maintaining a smooth operation and response for users and avoiding a single point of failure in the architecture.
-
-## Additional Resource(s)
-
-* Getting Started
- * [Configure Routes](../../getting-started/configure-routes.md)
- * [Load Balancing](../../getting-started/load-balancing.md)
-* Key Concepts
- * [Routes](./routes.md)
- * [Upstreams](./upstreams.md)
- * [Plugins](./plugins.md)
-
-[//]:
-[//]:
diff --git a/enterprise_versioned_docs/version-3.2.2/background-information/key-concepts/ssl-certificates.md b/enterprise_versioned_docs/version-3.2.2/background-information/key-concepts/ssl-certificates.md
deleted file mode 100644
index 7d6dd652..00000000
--- a/enterprise_versioned_docs/version-3.2.2/background-information/key-concepts/ssl-certificates.md
+++ /dev/null
@@ -1,153 +0,0 @@
----
-title: SSL Certificates
-slug: /key-concepts/ssl-certificates
----
-
-In this document, you will learn the basic concept of SSL certificate objects in APISIX and scenarios where you may need them, including configuring TLS or mTLS between client applications, APISIX, and upstream servers. You will go over the basics of SSL/TLS at the beginning to help further understanding when to use SSL certificate objects in APISIX.
-
-Explore additional resources at the end of the document for more information on related topics.
-
-## Overview
-
-_TLS (Transport Layer Security)_, being the successor to SSL (Secure Sockets Layer) protocol, is a cryptographic protocol designed to secure communication between two parties, such as a web browser and a web server. It is implemented on top of an existing protocol, such as HTTP or TCP, to provide an additional layer of security by establishing a connection through a TLS handshake and encrypting data transmission.
-
-The following is a high-level overview of the **one-way TLS handshake** in [TLS v1.2](https://www.rfc-editor.org/rfc/rfc5246) and [TLS v1.3](https://www.rfc-editor.org/rfc/rfc8446)—the two most commonly used TLS versions:
-
-
-

-
-
-During this process, the server authenticates itself to the client by presenting its certificate. The client verifies the certificate to ensure that it is valid and issued by a trusted authority. Once the certificate has been verified, the client and server agree on a shared secret, which is used to encrypt and decrypt the application data.
-
-APISIX also supports _mutual TLS_, or _mTLS_, where client also authenticates itself to the server by presenting its certificate, effectively creating a two-way TLS connection. This ensures that both parties are authenticated and helps prevent network attacks like [man-in-the-middle](https://en.wikipedia.org/wiki/Man-in-the-middle_attack).
-
-To enable TLS or mTLS in your system with APISIX, you should generate and configure certificates in the appropriate places, such as on client applications, APISIX, and/or upstream servers. For configuration on the APISIX side, an SSL certificate object may be required, depending on the segment of communication you want to secure:
-
-
-
-| | **TLS** | **mTLS** |
-|----------------------------------|---------------|----------|
-| **Client Application –– APISIX** | Required | Required |
-| **APISIX –– Upstream** | Not Required | Optional |
-
-
-
-You will learn about use cases and non-use cases of SSL objects for those scenarios.
-
-## TLS Between Client Applications and APISIX
-
-It is common practice to enforce TLS between client applications and APISIX as data transmission in this segment is typically over the public internet and therefore, is at a higher risk of being eavesdropped.
-
-The following diagram illustrates the usage of an SSL object in implementing TLS over HTTP (also known as HTTPS) between client applications and APISIX, where APISIX is deployed at an arbitrary IP address `132.69.xxx.xxx` behind the domain name `foodservice.com` and acts as a gatekeeper between public traffic and internal services:
-
-
-
-
-

-
-
-
-
-Here are the key steps that took place in the illustration:
-
-1. The client application initiates a request to `https://foodservice.com`.
-
-2. The request first goes through a DNS server, which resolves the domain name to an IP address and returns the IP address to the client application.
-
-3. The client application sends the request for `foodservice.com` to its resolved IP address, during which process, client application performs a [TLS handshake](#overview) with APISIX, where APISIX sends its certificate `server.crt` to client for authentication.
-
-4. As `foodservice.com` is included in the SNI list of the APISIX SSL object, the TLS handshake shall succeed. The communication between the client application and APISIX is now encrypted with TLS.
-
-5. APISIX routes and forwards the request to the corresponding upstream services over HTTP. Note that the upstream services are exposed on the default port 80 and TLS is terminated at APISIX in this example.
-
-[//]:
-[//]:
-
-For detailed instructions on how to configure HTTPS between client and APISIX, please refer to the [how-to guide](../how-to-guide/traffic-management/tls-and-mtls/configure-https-between-client-and-apisix).
-
-## TLS Between APISIX and Upstreams
-
-Upstream services may require TLS in cases where the traffic between the API gateway and upstreams is not secure or private. In a one-way TLS setup between APISIX and upstreams, upstream servers are responsible for presenting the certificate and key. On the APISIX side, you only need to configure [upstreams](./upstreams.md) to use HTTPS scheme and port 443 (or other designated port).
-
-For detailed instructions on how to configure TLS between APISIX and upstreams, please refer to the [how-to guide](../how-to-guide/traffic-management/tls-and-mtls/configure-upstream-https).
-
-## mTLS Between Client Applications and APISIX
-
-In closed systems where general access to back-end services is restricted, it is important for the server to verify the identity of the client to ensure that only authenticated and authorized clients are allowed to access the back-end services. One way to achieve this is to configure mTLS between the client and server. With mTLS, the client presents a certificate to the server during the TLS handshake process, and the server uses the certificate to verify the identity of the client. If the client is not authenticated, the server will reject the request.
-
-To configure mTLS between client applications and APISIX, in addition to the configuration required for TLS, you should also:
-
-1. Generate and configure certificates and keys on the client applications.
-
-2. Add the [Certificate Authority (CA)](https://en.wikipedia.org/wiki/Certificate_authority) certificate to the `client.ca` field in APISIX's SSL object, such as the following:
-
- ```json
- {
- "type": "server",
- "sni": "foodservice.com",
- "cert": "",
- "key": "",
- # highlight-start
- "client": {
- "ca": ""
- }
- # highlight-end
- }
- ```
-
- where the CA certificate is used to verify the digital signatures on client certificates issued by the CA, thereby verifying the identity of client applications.
-
-For detailed instructions on how to configure mTLS between client and APISIX, please refer to the [how-to guide](../how-to-guide/traffic-management/tls-and-mtls/configure-mtls-between-client-and-apisix).
-
-[//]:
-[//]:
-
-## mTLS Between APISIX and Upstreams
-
-mTLS between an API gateway and its upstream services is typically implemented in high-security environments by organizations, such as financial institutions, who need to stay compliant with relevant security standards and regulations.
-
-In APISIX, whether to use an SSL object in configuring mTLS between APISIX and its upstream services is determined by whether the configuration will be repetitive.
-
-If the certificate is valid for only one domain, you can choose to directly configure the certificate and key in the upstream object:
-
-
-

-
-
-
-
-When a certificate, such as a wildcard certificate, is valid for multiple domains, it is recommended to create a single SSL object to store the certificate and key and avoid the repetitive TLS configurations on upstreams:
-
-
-
-
-

-
-
-
-
-For detailed instructions on how to configure mTLS between APISIX and upstreams, please refer to the how-to guide (coming soon).
-
-## Additional Resource(s)
-
-* Key Concepts
-
- * [Routes](./routes.md)
- * [Upstreams](./upstreams.md)
-
-[//]:
-[//]:
-[//]:
-[//]:
diff --git a/enterprise_versioned_docs/version-3.2.2/background-information/key-concepts/upstreams.md b/enterprise_versioned_docs/version-3.2.2/background-information/key-concepts/upstreams.md
deleted file mode 100644
index 970f6686..00000000
--- a/enterprise_versioned_docs/version-3.2.2/background-information/key-concepts/upstreams.md
+++ /dev/null
@@ -1,73 +0,0 @@
----
-title: Upstreams
-slug: /key-concepts/upstreams
----
-
-In this document, you will learn the basic concept of an upstream object in API7 Enterprise Edition and why you would want to use it. You will be introduced to a few relevant features, including load balancing, service discovery, and upstream health checking.
-
-Explore additional resources at the end for more information on related topics.
-
-## Overview
-
-An _upstream_ object in API7 Enterprise Edition is a logical abstraction of a set containing one or more upstream addresses. It is required in [services](../key-concepts/services.md) to specify **where** requests flow to and **how** they are distributed.
-
-Here is an example of such a configuration in service routes, where the same upstream address is repeated across three different service routes:
-
-
-
-
-

-
-
-
-
-As you can probably see, large-scale systems with many services would benefit significantly from configuring identical groups of upstream addresses in upstream objects, reducing redundant information and operational costs.
-
-## Load Balancing
-
-An important use case of upstreams is to help [enable load balancing](../../getting-started/load-balancing.md) - that is, outlining where client requests are forwarded to and how they are distributed among back-end replicas.
-
-In upstreams, there are four load-balancing algorithms available to choose from:
-
-* `Round Robin` - weighted round robin
-* `Consistent Hash` - consistent hashing
-* `Exponentially Weighted Moving Average(EWMA)` - exponentially weighted moving average
-* `Least Connection` - least connections
-
-For detailed instructions and explanation about load balancing in API7 Enterprise Edition, please refer to the load balancing how-to guide and API Reference (coming soon).
-
-[//]:
-[//]:
-
-## Service Discovery
-
-While it is straightforward to figure upstream addresses statically, in microservice-based architectures, upstream addresses are often dynamically assigned and therefore, changed, during autoscaling, failures, and updates. Static configurations are less than ideal in this case.
-
-Service discovery comes to rescue. It describes the process of automatically detecting the available upstream services, keeping their addresses in a database (called a service registry) for others to reference. That way, an API gateway can always fetch the latest list of upstream addresses through the registry, ensuring all requests are forwarded to healthy upstream nodes.
-
-API7 Enterprise Edition supports integrations with many service registries, such as Consul, Eureka, Nacos, Kubernetes service discovery, and more.
-
-For more details about how to integrate with third-party service registries, please see Service Discovery (coming soon).
-
-[//]:
-
-## Upstream Health Checking
-
-API7 Enterprise Edition provides active and passive health checking options to probe if upstream nodes are online (a.k.a. healthy). Unhealthy upstream nodes will be ignored until they recover and are deemed healthy again.
-
-Upstream health checking can be configured in the `checks` parameter in an upstream object.
-
-More details about how to configure upstream health checking will be provided in Active and Passive Health Checking (coming soon).
-
-[//]:
-[//]:
-
-## Additional Resource(s)
-
-* Getting Started - [Load Balancing](../../getting-started/load-balancing.md)
-[//]:
-[//]:
-[//]:
-[//]:
diff --git a/enterprise_versioned_docs/version-3.2.2/enterprise-edition/features/audit.md b/enterprise_versioned_docs/version-3.2.2/enterprise-edition/features/audit.md
deleted file mode 100644
index 37859229..00000000
--- a/enterprise_versioned_docs/version-3.2.2/enterprise-edition/features/audit.md
+++ /dev/null
@@ -1,6 +0,0 @@
----
-title: Audit
-slug: /enterprise-edition/audit
----
-
-Coming soon.
diff --git a/enterprise_versioned_docs/version-3.2.2/enterprise-edition/features/rbac.md b/enterprise_versioned_docs/version-3.2.2/enterprise-edition/features/rbac.md
deleted file mode 100644
index 9da97349..00000000
--- a/enterprise_versioned_docs/version-3.2.2/enterprise-edition/features/rbac.md
+++ /dev/null
@@ -1,6 +0,0 @@
----
-title: RBAC
-slug: /enterprise-edition/rabc
----
-
-Coming soon.
diff --git a/enterprise_versioned_docs/version-3.2.2/enterprise-edition/install-enterprise-trial.md b/enterprise_versioned_docs/version-3.2.2/enterprise-edition/install-enterprise-trial.md
deleted file mode 100644
index 22fb4334..00000000
--- a/enterprise_versioned_docs/version-3.2.2/enterprise-edition/install-enterprise-trial.md
+++ /dev/null
@@ -1,10 +0,0 @@
----
-title: Install API7 Enterprise
-slug: /enterprise-edition/install
----
-
-You can install API7 Enterprise with a trial option to start for free.
-
-Visit [API7 Enterprise](https://api7.ai/try?product=enterprise) and complete the download form to obtain a trial license, which should be sent to the email you provided.
-
-Proceeding with the download, you will be redirected to the installation instructions.
diff --git a/enterprise_versioned_docs/version-3.2.2/getting-started/configure-routes.md b/enterprise_versioned_docs/version-3.2.2/getting-started/configure-routes.md
deleted file mode 100644
index fbcdfc8e..00000000
--- a/enterprise_versioned_docs/version-3.2.2/getting-started/configure-routes.md
+++ /dev/null
@@ -1,67 +0,0 @@
----
-title: Configure Routes
-slug: /getting-started/configure-routes
----
-
-Apache APISIX provides flexible gateway management capabilities based on routes, where routing paths and targets are defined for requests.
-
-This tutorial guides you on how to create a route and validate it. You will complete the following steps:
-
-1. Create a route with a sample _upstream_ that points to [httpbin.org](http://httpbin.org).
-2. Use _cURL_ to send a test request to see how APISIX proxies and forwards the request.
-
-## What Is a Route
-
-A _route_ is a routing path to upstream targets. In [Apache APISIX](https://api7.ai/apisix), routes are responsible for matching client's requests based on defined rules, loading and executing the corresponding plugins, as well as forwarding requests to the specified upstream services.
-
-In APISIX, a simple route can be set up with a path-matching URI and a corresponding upstream address.
-
-## What Is an Upstream
-
-An _upstream_ is a set of target nodes with the same work. It defines a virtual host abstraction that performs load balancing on a given set of service nodes according to the configured rules.
-
-## Prerequisite(s)
-
-1. Complete [Get APISIX](./) to install APISIX.
-
-## Create a Route
-
-In this section, you will create a route that forwards client requests to [httpbin.org](http://httpbin.org), a public HTTP request and response service.
-
-The following command creates a route, which should forward all requests sent to `http://127.0.0.1:9080/ip` to [httpbin.org/ip](http://httpbin.org/ip):
-
-[//]:
-
-```shell
-curl -i "http://127.0.0.1:9180/apisix/admin/routes" -X PUT -d '
-{
- "id": "getting-started-ip",
- "uri": "/ip",
- "upstream": {
- "type": "roundrobin",
- "nodes": {
- "httpbin.org:80": 1
- }
- }
-}'
-```
-
-You will receive an `HTTP/1.1 201 OK` response if the route was created successfully.
-
-## Validate
-
-```shell
-curl "http://127.0.0.1:9080/ip"
-```
-
-The expected response is similar to the following:
-
-```text
-{
- "origin": "183.94.122.205"
-}
-```
-
-## What's Next
-
-This tutorial creates a route with only one target node. In the next tutorial, you will learn how to configure load balancing with multiple target nodes.
diff --git a/enterprise_versioned_docs/version-3.2.2/getting-started/get-apisix.md b/enterprise_versioned_docs/version-3.2.2/getting-started/get-apisix.md
deleted file mode 100644
index c0e6b56f..00000000
--- a/enterprise_versioned_docs/version-3.2.2/getting-started/get-apisix.md
+++ /dev/null
@@ -1,62 +0,0 @@
----
-title: Get APISIX
-slug: /getting-started/
----
-
-Apache APISIX is a dynamic, real-time, and high-performance API Gateway. It is a [top-level project](https://projects.apache.org/project.html?apisix) of the Apache Software Foundation.
-
-You can use APISIX API Gateway as a traffic entrance to process all business data. It offers features including dynamic routing, dynamic upstream, dynamic certificates, A/B testing, canary release, blue-green deployment, limit rate, defense against malicious attacks, metrics, monitoring alarms, service observability, service governance, and more.
-
-This tutorial uses a script to quickly install [Apache APISIX](https://api7.ai/apisix) in your local environment and verifies the installation through the Admin API. You can also use [API7 Cloud](https://api7.ai/cloud), a Cloud-host service, to manage APISIX.
-
-## Prerequisite(s)
-
-* Install [Docker](https://docs.docker.com/get-docker/) to be used in the quickstart script to create containerized **etcd** and **APISIX**.
-* Install [cURL](https://curl.se/) to be used in the quickstart script and to send requests to APISIX for validation.
-
-## Get APISIX
-
-:::caution
-
-To provide a better experience in this tutorial, the requirement of Admin API key is switched off by default. Please turn on the API key requirement of Admin API in the production environment.
-
-:::
-
-APISIX can be easily installed and started with the quickstart script:
-
-```shell
-curl -sL https://run.api7.ai/apisix/quickstart-v3.2.0 | sh
-```
-
-The script starts two Docker containers, `apisix-quickstart` and `etcd-quickstart` in the `apisix-quickstart-net` Docker network, where etcd is used to store APISIX configurations.
-
-You should see the following message once APISIX is ready:
-
-```text
-✔ APISIX is ready!
-```
-
-## Validate
-
-Once APISIX is running, you can use curl to send a request to see if APISIX is working properly:
-
-```shell
-curl -sI "http://127.0.0.1:9080" | grep Server
-```
-
-If everything is ok, you will get the APISIX version similar to the following:
-
-```text
-Server: APISIX/3.2.0
-```
-
-APISIX is now installed and running.
-
-## Next Steps
-
-The following tutorial is based on the working APISIX, please keep everything running and move on to the next step.
-
-* [Configure Routes](configure-routes.md)
-* [Load Balancing](load-balancing.md)
-* [Rate Limiting](rate-limiting.md)
-* [Key Authentication](key-authentication.md)
diff --git a/enterprise_versioned_docs/version-3.2.2/getting-started/install-api7-ee.md b/enterprise_versioned_docs/version-3.2.2/getting-started/install-api7-ee.md
new file mode 100644
index 00000000..f4064930
--- /dev/null
+++ b/enterprise_versioned_docs/version-3.2.2/getting-started/install-api7-ee.md
@@ -0,0 +1,4 @@
+---
+title: Install API7 Enterprise Edition
+slug: /getting-started/install-api7-ee
+---
\ No newline at end of file
diff --git a/enterprise_versioned_docs/version-3.2.2/getting-started/key-authentication.md b/enterprise_versioned_docs/version-3.2.2/getting-started/key-authentication.md
deleted file mode 100644
index 3c81cf38..00000000
--- a/enterprise_versioned_docs/version-3.2.2/getting-started/key-authentication.md
+++ /dev/null
@@ -1,148 +0,0 @@
----
-title: Key Authentication
-slug: /getting-started/key-authentication
----
-
-An API gateway's primary role is to connect API consumers and providers. For security reasons, it should authenticate and authorize consumers before access to internal resources.
-
-
-
-APISIX has a flexible plugin extension system and a number of existing plugins for user authentication and authorization. For example:
-
-- [Key Authentication](https://apisix.apache.org/docs/apisix/plugins/key-auth/)
-- [Basic Authentication](https://apisix.apache.org/docs/apisix/plugins/basic-auth/)
-- [JSON Web Token (JWT) Authentication](https://apisix.apache.org/docs/apisix/plugins/jwt-auth/)
-- [Keycloak](https://apisix.apache.org/docs/apisix/plugins/authz-keycloak/)
-- [Casdoor](https://apisix.apache.org/docs/apisix/plugins/authz-casdoor/)
-- [Wolf RBAC](https://apisix.apache.org/docs/apisix/plugins/wolf-rbac/)
-- [OpenID Connect](https://apisix.apache.org/docs/apisix/plugins/openid-connect/)
-- [Central Authentication Service (CAS)](https://apisix.apache.org/docs/apisix/plugins/cas-auth/)
-- [HMAC](https://apisix.apache.org/docs/apisix/plugins/hmac-auth/)
-- [Casbin](https://apisix.apache.org/docs/apisix/plugins/authz-casbin/)
-- [LDAP](https://apisix.apache.org/docs/apisix/plugins/ldap-auth/)
-- [Open Policy Agent (OPA)](https://apisix.apache.org/docs/apisix/plugins/opa/)
-- [Forward Authentication](https://apisix.apache.org/docs/apisix/plugins/forward-auth/)
-
-In this tutorial, you will create a consumer with key authentication, and learn how to enable and disable key authentication.
-
-## What Is a Consumer
-
-A _consumer_ is an application or a developer who consumes the API.
-
-In APISIX, a consumer requires a unique `username` to be created. As part of the key authentication configuration, you would also add one of the authentication plugins from the list above to the consumer's `plugin` field.
-
-## What Is Key Authentication
-
-Key authentication is a relatively simple but widely used authentication approach. The idea is as follows:
-
-1. Administrator adds an authentication key (API key) to the Route.
-2. API consumers add the key to the query string or headers for authentication when sending requests.
-
-## Enable Key Authentication
-
-### Prerequisite(s)
-
-1. Complete [Get APISIX](./) to install APISIX.
-2. Complete [Configure Routes](./configure-routes#whats-a-route).
-
-### Create a Consumer
-
-Create a consumer named `tom` and enable the `key-auth` plugin with an API key `secret-key`. All requests sent with the key `secret-key` should be authenticated as `tom`.
-
-:::caution
-
-Please use a complex key in the Production environment.
-
-:::
-
-```shell
-curl -i "http://127.0.0.1:9180/apisix/admin/consumers" -X PUT -d '
-{
- "username": "tom",
- "plugins": {
- "key-auth": {
- "key": "secret-key"
- }
- }
-}'
-```
-
-You will receive an `HTTP/1.1 201 OK` response if the consumer was created successfully.
-
-### Enable Authentication
-
-Re-using the same route `getting-started-ip` from [Configure Routes](./configure-routes), you can use the `PATCH` method to add the `key-auth` plugin to the route:
-
-```shell
-curl -i "http://127.0.0.1:9180/apisix/admin/routes/getting-started-ip" -X PATCH -d '
-{
- "plugins": {
- "key-auth": {}
- }
-}'
-```
-
-You will receive an `HTTP/1.1 200 OK` response if the plugin was added successfully.
-
-### Validate
-
-Validate if authentication is successfully enabled in the following steps.
-
-#### 1. Send a request without any key
-
-Send a request without the `apikey` header.
-
-```shell
-curl -i "http://127.0.0.1:9080/ip"
-```
-
-Since the key is not provided, you will receive an unauthorized `HTTP/1.1 401 Unauthorized` response.
-
-#### 2. Send a request with a wrong key
-
-Send a request with a wrong key in the `apikey` header.
-
-```shell
-curl -i "http://127.0.0.1:9080/ip" -H 'apikey: wrong-key'
-```
-
-Since the key is incorrect, you will receive an `HTTP/1.1 401 Unauthorized` response.
-
-#### 3. Send a request with the correct key
-
-Send a request with the correct key in the `apikey` header.
-
-```shell
-curl -i "http://127.0.0.1:9080/ip" -H 'apikey: secret-key'
-```
-
-Since the correct key is provided, you will receive an `HTTP/1.1 200 OK` response.
-
-### Disable Authentication
-
-Disable the key authentication plugin by setting the `_meta.disable` parameter to `true`.
-
-```shell
-curl "http://127.0.0.1:9180/apisix/admin/routes/getting-started-ip" -X PATCH -d '
-{
- "plugins": {
- "key-auth": {
- "_meta": {
- "disable": true
- }
- }
- }
-}'
-```
-
-Send a request without any key to validate:
-
-```shell
-curl -i "http://127.0.0.1:9080/ip"
-```
-
-Since key authentication is disabled, you will receive an `HTTP/1.1 200 OK` response.
-
-## What's Next
-
-You have learned how to configure key authentication for a route. In the next tutorial, you will learn how to configure rate limiting.
diff --git a/enterprise_versioned_docs/version-3.2.2/getting-started/load-balancing.md b/enterprise_versioned_docs/version-3.2.2/getting-started/load-balancing.md
deleted file mode 100644
index d79e93c1..00000000
--- a/enterprise_versioned_docs/version-3.2.2/getting-started/load-balancing.md
+++ /dev/null
@@ -1,75 +0,0 @@
----
-title: Load Balancing
-slug: /getting-started/load-balancing
----
-
-Load balancing is a technique used to distribute network request loads. It is a key consideration in designing systems that need to handle a large volume of traffic, allowing for improved system performance, scalability, and reliability
-
-Apache APISIX supports a variety of [load balancing algorithms](../background-information/key-concepts/upstreams.md#load-balancing), one of which is the weighted round-robin algorithm. This algorithm distributes incoming requests over a set of servers in a cyclical pattern.
-
-In this tutorial, you will create a route with two upstream services and uses the round-robin load balancing algorithm to load balance requests.
-
-## Prerequisite(s)
-
-1. Complete [Get APISIX](./) to install APISIX.
-2. Understand APISIX [Route and Upstream](./configure-routes#whats-a-route).
-
-## Enable Load Balancing
-
-Create a route with two upstream services, [httpbin.org](https://httpbin.org/headers) and [mock.api7.ai](https://mock.api7.ai/headers), to distribute requests across. Both services respond with the request headers when receiving request at `/headers`:
-
-```shell
-curl -i "http://127.0.0.1:9180/apisix/admin/routes" -X PUT -d '
-{
- "id": "getting-started-headers",
- "uri": "/headers",
- "upstream" : {
-# highlight-start
- // Annotate 1
- "type": "roundrobin",
- // Annotate 2
- "nodes": {
-# highlight-end
- "httpbin.org:443": 1,
- "mock.api7.ai:443": 1
- },
-# highlight-start
- // Annotate 3
- "pass_host": "node",
- // Annotate 4
- "scheme": "https"
-# highlight-end
- }
-}'
-```
-
-❶ `type`: use `roundrobin` as the load balancing algorithm.
-
-❷ `nodes`: upstream services.
-
-❸ `pass_host`: use `node` to pass the host header to the upstream.
-
-❹ `scheme`: use `https` to enable TLS with upstream.
-
-You should receive an `HTTP/1.1 201 OK` response if the route was created successfully.
-
-## Validate
-
-Generate 50 consecutive requests to APISIX `/headers` route to see the load-balancing effect:
-
-```shell
-resp=$(seq 50 | xargs -I{} curl "http://127.0.0.1:9080/headers" -sL) && \
- count_httpbin=$(echo "$resp" | grep "httpbin.org" | wc -l) && \
- count_mockapi7=$(echo "$resp" | grep "mock.api7.ai" | wc -l) && \
- echo httpbin.org: $count_httpbin, mock.api7.ai: $count_mockapi7
-```
-
-The command keeps count of the number of requests that was handled by the two services respectively. The output shows that requests were distributed over to the two services:
-
-```text
-httpbin.org: 23, mock.api7.ai: 27
-```
-
-## What's Next
-
-You have learned how to configure load balancing. In the next tutorial, you will learn how to configure key authentication.
diff --git a/enterprise_versioned_docs/version-3.2.2/getting-started/rate-limiting.md b/enterprise_versioned_docs/version-3.2.2/getting-started/rate-limiting.md
deleted file mode 100644
index e8988c9d..00000000
--- a/enterprise_versioned_docs/version-3.2.2/getting-started/rate-limiting.md
+++ /dev/null
@@ -1,104 +0,0 @@
----
-title: Rate Limiting
-slug: /getting-started/rate-limiting
----
-
-APISIX is a unified control point, managing the ingress and egress of APIs and microservices traffic. In addition to the legitimate client requests, these requests may also include unwanted traffic generated by web crawlers as well as cyber attacks, such as DDoS.
-
-APISIX offers rate limiting capabilities to protect APIs and microservices by limiting the number of requests sent to upstream services in a given period of time. The count of requests is done efficiently in memory with low latency and high performance.
-
-
-
-

-
-
-
-In this tutorial, you will enable the `limit-count` plugin to set a rate limiting constraint on the incoming traffic.
-
-## Prerequisite(s)
-
-1. Complete the [Get APISIX](./) step to install APISIX first.
-2. Complete the [Configure Routes](./configure-routes#whats-a-route) step.
-
-## Enable Rate Limiting
-
-The following route `getting-started-ip` is inherited from [Configure Routes](./configure-routes). You only need to use the `PATCH` method to add the `limit-count` plugin to the route:
-
-```shell
-curl -i "http://127.0.0.1:9180/apisix/admin/routes/getting-started-ip" -X PATCH -d '
-{
- "plugins": {
- "limit-count": {
- "count": 2,
- "time_window": 10,
- "rejected_code": 429
- }
- }
-}'
-```
-
-You will receive an `HTTP/1.1 200 OK` response if the plugin was added successfully. The above configuration limits the incoming requests to a maximum of 2 requests within 10 seconds.
-
-### Validate
-
-Generate 50 simultaneous requests to see the rate limiting plugin in effect.
-
-```shell
-resp=$(seq 50 | xargs -I{} curl "http://127.0.0.1:9080/ip" -o /dev/null -s -w "%{http_code}\n") && \
- count_200=$(echo "$resp" | grep "200" | wc -l) && \
- count_429=$(echo "$resp" | grep "429" | wc -l) && \
- echo "200": $count_200, "429": $count_429
-```
-
-The results are as expected: out of the 50 requests, 2 requests were sent successfully (status code `200`) while the others were rejected (status code `429`).
-
-```text
-"200": 2, "429": 48
-```
-
-## Disable Rate Limiting
-
-Disable rate limiting by setting the `_meta.disable` parameter to `true`:
-
-```shell
-curl -i "http://127.0.0.1:9180/apisix/admin/routes/getting-started-ip" -X PATCH -d '
-{
- "plugins": {
- "limit-count": {
- "_meta": {
- "disable": true
- }
- }
- }
-}'
-```
-
-### Validate
-
-Generate 50 requests again to validate if it is disabled:
-
-```shell
-resp=$(seq 50 | xargs -I{} curl "http://127.0.0.1:9080/ip" -o /dev/null -s -w "%{http_code}\n") && \
- count_200=$(echo "$resp" | grep "200" | wc -l) && \
- count_429=$(echo "$resp" | grep "429" | wc -l) && \
- echo "200": $count_200, "429": $count_429
-```
-
-The results below show that all of the requests were sent successfully:
-
-```text
-"200": 50, "429": 0
-```
-
-## More
-
-[//]:
-[//]:
-
-You can use [APISIX variables](../reference/built-in-variables.md#apisix-variables) to configure fined matching rules of rate limiting, such as `$host` and `$uri`. In addition, APISIX also supports rate limiting at the cluster level using Redis.
-
-## What's Next
-
-Congratulations! You have learned how to configure rate limiting and completed the Getting Started tutorials.
-
-You can continue to explore other documentations to customize APISIX and meet your production needs.
diff --git a/enterprise_versioned_docs/version-3.2.2/getting-started/set-up-and-launch-init-api.md b/enterprise_versioned_docs/version-3.2.2/getting-started/set-up-and-launch-init-api.md
new file mode 100644
index 00000000..79c9dafc
--- /dev/null
+++ b/enterprise_versioned_docs/version-3.2.2/getting-started/set-up-and-launch-init-api.md
@@ -0,0 +1,4 @@
+---
+title: Set Up and Launch Your Initial API
+slug: /getting-started/set-up-and-launch-init-api
+---
\ No newline at end of file
diff --git a/enterprise_versioned_docs/version-3.2.2/how-to-guide/authentication/set-up-sso-with-oidc-and-keycloak.md b/enterprise_versioned_docs/version-3.2.2/how-to-guide/authentication/set-up-sso-with-oidc-and-keycloak.md
deleted file mode 100644
index ada5e0a8..00000000
--- a/enterprise_versioned_docs/version-3.2.2/how-to-guide/authentication/set-up-sso-with-oidc-and-keycloak.md
+++ /dev/null
@@ -1,340 +0,0 @@
----
-title: Set Up SSO with OIDC and Keycloak
-slug: /how-to-guide/authentication/set-up-sso-with-oidc-and-keycloak
----
-
-[OpenID Connect (OIDC)](https://openid.net/connect/) is a simple identity layer on top of the [OAuth 2.0 protocol](https://www.rfc-editor.org/rfc/rfc6749). It allows clients to verify the identity of end users based on the authentication performed by the identity provider, as well as to obtain basic profile information about end users in an interoperable and REST-like manner.
-
-[Keycloak](https://www.keycloak.org/) is an open source identity and access management solution for modern applications and services. Keycloak supports single sign-on (SSO), which enables services to interface with Keycloak through protocols such as OIDC and OAuth 2.0. In addition, Keycloak also supports delegating authentication to third party identity providers such as Facebook and Google.
-
-APISIX supports SSO with OIDC to protect APIs by integrating Keycloak as an identity provider.
-
-This guide will show you how to use the plugin `openid-connect` to integrate APISIX with Keycloak. There are [two types of use cases](https://www.keycloak.org/docs/latest/securing_apps/#openid-connect-2) when using OIDC and Keycloak:
-
-* Applications ask the Keycloak server to authenticate users. It is described in [Authentication With User Credentials](#authentication-with-user-credentials).
-* Clients want to gain access to remote services. It is described in [Authentication With Access Token](#authentication-with-access-token).
-
-
-
-## Prerequisite(s)
-
-* Install [Docker](https://docs.docker.com/get-docker/).
-* Install [cURL](https://curl.se/) to send requests to the services for validation.
-* Follow the [Getting Started tutorial](../../getting-started/) to start a new APISIX instance in Docker.
-
-## Configure Keycloak
-
-Start a Keycloak instance named `apisix-quickstart-keycloak` with the administrator name `quickstart-admin` and password `quickstart-admin-pass` in [development mode](https://www.keycloak.org/server/configuration#_starting_keycloak_in_development_mode) in Docker. The exposed port is mapped to 8080 on the host machine:
-
-```shell
-docker run -d --name "apisix-quickstart-keycloak" \
-# highlight-start
- -e 'KEYCLOAK_ADMIN=quickstart-admin' \
- -e 'KEYCLOAK_ADMIN_PASSWORD=quickstart-admin-pass' \
- -p 8080:8080 \
-# highlight-end
- quay.io/keycloak/keycloak:18.0.2 start-dev
-```
-
-Keycloak provides an easy-to-use web UI to help the administrator manage all resources, such as clients, roles, and users.
-
-Navigate to `http://localhost:8080` in browser to access the Keycloak web page, then click __Administration Console__:
-
-
-
-Enter the administrator’s username `quickstart-admin` and password `quickstart-admin-pass` and sign in:
-
-
-
-You need to maintain the login status to configure Keycloak during the following steps.
-
-### Create a Realm
-
-_Realms_ in Keycloak are workspaces to manage resources such as users, credentials, and roles. The resources in different realms are isolated from each other. You need to create a realm named `quickstart-realm` for APISIX.
-
-From the left menu, select __Add realm__:
-
-
-
-Enter the realm name `quickstart-realm` and click __Create__ to create it:
-
-
-
-### Create a Client
-
-_Clients_ in Keycloak are entities that request Keycloak to authenticate a user. More often, clients are applications that want to use Keycloak to secure themselves and provide a single sign-on solution. APISIX is equivalent to a client that is responsible for initiating authentication requests to Keycloak, so you need to create its corresponding client named `apisix-quickstart-client`.
-
-Click __Clients__ > __Create__ to open the __Add Client__ page:
-
-
-
-Enter __Client ID__ as `apisix-quickstart-client`, then select __Client Protocol__ as `openid-connect` and __Save__:
-
-
-
-The client `apisix-quickstart-client` is created. After redirecting to the detailed page, select `confidential` as the __Access Type__:
-
-
-
-When the user login is successful during the SSO, Keycloak will carry the state and code to redirect the client to the addresses in __Valid Redirect URIs__. To simplify the operation, enter wildcard `*` to consider any URI valid:
-
-
-
-Select __Save__ to apply custom configurations.
-
-### Create a User
-
-Users in Keycloak are entities that are able to log into the system. They can have attributes associated with themselves, such as username, email, and address. You need to create a user for login authentication.
-
-Click __Users__ > __Add user__ to open the __Add user__ page:
-
-
-
-Enter the __Username__ as `quickstart-user` and select __Save__:
-
-
-
-Click on __Credentials__, then set the __Password__ as `quickstart-user-pass`. Switch __Temporary__ to `OFF` to turn off the restriction, so that you need not to change password the first time you log in:
-
-
-
-## Obtain the OIDC Configuration
-
-In this section, you will obtain the key OIDC configuration from Keycloak and define them as shell variables. Steps after this section will use these variables to configure the OIDC by shell commands.
-
-:::info
-
-Open a separate terminal to follow the steps and define related shell variables. Then steps after this section could use the defined variables directly.
-
-:::
-
-### Get Discovery Endpoint
-
-Click __Realm Settings__, then right click __OpenID Endpoints Configuration__ and copy the link.
-
-
-
-The link should be the same as the following:
-
-```text
-http://localhost:8080/realms/quickstart-realm/.well-known/openid-configuration
-```
-
-Both APISIX and your client (browser and terminal) should access the discovery URI during OIDC authentication progress. You need to replace `localhost` with the actual host IP, thus the APISIX instance in Docker can access the discovery URI successfully.
-
-Define a variable named `KEYCLOAK_IP` to store the real machine IP, then define a variable named `OIDC_DISCOVERY` to store the URI of discovery:
-
-```shell
-KEYCLOAK_IP=192.168.42.145 # Replace this value with your ip
-OIDC_DISCOVERY=http://${KEYCLOAK_IP}:8080/realms/quickstart-realm/.well-known/openid-configuration
-```
-
-### Get Client ID and Secret
-
-Click __Clients__ > `apisix-quickstart-client` > __Credentials__, then copy the client secret from __Secret__:
-
-
-
-
-
-Define the variables `OIDC_CLIENT_ID` and `OIDC_CLIENT_SECRET` to store client id and secret, respectively. You need to replace the secret with yours:
-
-```shell
-OIDC_CLIENT_ID=apisix-quickstart-client
-OIDC_CLIENT_SECRET=bSaIN3MV1YynmtXvU8lKkfeY0iwpr9cH # Replace this value with yours
-```
-
-## Authentication With User Credentials
-
-In this section, you will create a route with OIDC that forwards client requests to [httpbin.org](http://httpbin.org/), a public HTTP request and response service.
-
-The route `/anything/{anything}` of `httpbin.org` returns anything passed in request data in JSON type, such as methods, arguments, and headers.
-
-### Enable OIDC Plugin
-
-Create a route with id `auth-with-oidc` and enable the plugin `openid-connect`, which forwards all requests sent to `/anything/*` to the upstream `httpbin.org`:
-
-```shell
-curl -i "http://127.0.0.1:9180/apisix/admin/routes" -X PUT -d '
-{
- "id": "auth-with-oidc",
- "uri":"/anything/*",
- "plugins": {
- "openid-connect": {
-# highlight-start
- // Annotate 1
- "client_id": "'"$OIDC_CLIENT_ID"'",
- // Annotate 2
- "client_secret": "'"$OIDC_CLIENT_SECRET"'",
- // Annotate 3
- "discovery": "'"$OIDC_DISCOVERY"'",
-# highlight-end
- "scope": "openid profile",
-# highlight-start
- // Annotate 4
- "redirect_uri": "http://localhost:9080/anything/callback"
-# highlight-end
- }
- },
- "upstream":{
- "type":"roundrobin",
- "nodes":{
- "httpbin.org:80":1
- }
- }
-}'
-```
-
-❶ `client_id`: OAuth client ID.
-
-❷ `client_secret`: OAuth client secret.
-
-❸ `discovery`: Discovery endpoint URL of the identity server.
-
-❹ `redirect_uri`: URI to which the identity provider redirects back to.
-
-Identity provider redirects requests to a pre-configured URI in `redirect_uri` for token exchange after users sign in. Here you can use a valid URI `http://localhost:9080/anything/callback` defined in this route. More details can be found in [Keycloak document of securing apps](https://www.keycloak.org/docs/latest/securing_apps/#redirect-uris).
-
-### Test With Correct Credentials
-
-Navigate to `http://127.0.0.1:9080/anything/test` in browser. The request will be redirected to a login page:
-
-
-
-Sign in with correct username `quickstart-user` and password `quickstart-user-pass`. If the authentication is successful, the route will redirect the request to upstream `httpbin.org`. A valid response similar to the following verifies that OIDC plugin works:
-
-```json
-{
- "args": {},
- "data": "",
- "files": {},
- "form": {},
- "headers": {
- "Accept": "text/html..."
- ...
- },
- "json": null,
- "method": "GET",
- "origin": "127.0.0.1, 59.71.244.81",
- "url": "http://127.0.0.1/anything/test"
-}
-```
-
-### Test With Wrong Credentials
-
-Sign in with wrong password:
-
-
-
-Authentication failed verifies that the OIDC plugin works and rejects requests with wrong credentials.
-
-## Authentication With Access Token
-
-In this section, you will create a route with OIDC similar to the previous section [Authentication With Username and Password](#authentication-with-username-and-password), but authenticate with access token in headers instead of asking the Keycloak server.
-
-### Enable OIDC Plugin
-
-Create a route with id `auth-with-oidc` and enable the plugin `openid-connect`, but adding the parameter `bearer_only` with a value of true:
-
-```shell
-curl -i "http://127.0.0.1:9180/apisix/admin/routes" -X PUT -d '
-{
- "id": "auth-with-oidc",
- "uri":"/anything/*",
- "plugins": {
- "openid-connect": {
-# highlight-start
- // Annotate 1
- "client_id": "'"$OIDC_CLIENT_ID"'",
- // Annotate 2
- "client_secret": "'"$OIDC_CLIENT_SECRET"'",
- // Annotate 3
- "discovery": "'"$OIDC_DISCOVERY"'",
-# highlight-end
- "scope": "openid profile",
-# highlight-start
- // Annotate 4
- "bearer_only": true,
- // Annotate 5
- "redirect_uri": "http://127.0.0.1:9080/anything/callback"
-# highlight-end
- }
- },
- "upstream":{
- "type":"roundrobin",
- "nodes":{
- "httpbin.org:80":1
- }
- }
-}'
-```
-
-❶ `client_id`: OAuth client ID.
-
-❷ `client_secret`: OAuth client secret.
-
-❸ `discovery`: Discovery endpoint URL of the identity server.
-
-❹ `bearer_only`: If its value is true, APISIX only check if the authorization header in the request matches a bearer token.
-
-❺ `redirect_uri`: URI to which the identity provider redirects back to.
-
-### Test With Valid Access Token
-
-Call the Keycloak [Token endpoint](https://www.keycloak.org/docs/latest/securing_apps/#token-endpoint) to obtain the access token with parameters client ID, client secret, username, and password:
-
-```shell
-OIDC_USER=quickstart-user
-OIDC_PASSWORD=quickstart-user-pass
-curl -i "http://$KEYCLOAK_IP:8080/realms/quickstart-realm/protocol/openid-connect/token" -X POST \
- -d 'grant_type=password' \
- -d 'client_id='$OIDC_CLIENT_ID'&client_secret='$OIDC_CLIENT_SECRET'' \
- -d 'username='$OIDC_USER'&password='$OIDC_PASSWORD''
-```
-
-The expected response is similar to the following:
-
-```text
-{"access_token":"eyJhbGciOiJSUzI1NiIsInR5cCIgOiAiSldUIiwia2lkIiA6ICJ6U3FFaXN6VlpuYi1sRWMzZkp0UHNpU1ZZcGs4RGN3dXI1Mkx5V05aQTR3In0.eyJleHAiOjE2ODAxNjA5NjgsImlhdCI6MTY4MDE2MDY2OCwianRpIjoiMzQ5MTc4YjQtYmExZC00ZWZjLWFlYTUtZGY2MzJiMDJhNWY5IiwiaXNzIjoiaHR0cDovLzE5Mi4xNjguNDIuMTQ1OjgwODAvcmVhbG1zL3F1aWNrc3RhcnQtcmVhbG0iLCJhdWQiOiJhY2NvdW50Iiwic3ViIjoiMTg4MTVjM2EtNmQwNy00YTY2LWJjZjItYWQ5NjdmMmIwMTFmIiwidHlwIjoiQmVhcmVyIiwiYXpwIjoiYXBpc2l4LXF1aWNrc3RhcnQtY2xpZW50Iiwic2Vzc2lvbl9zdGF0ZSI6ImIxNmIyNjJlLTEwNTYtNDUxNS1hNDU1LWYyNWUwNzdjY2I3NiIsImFjciI6IjEiLCJyZWFsbV9hY2Nlc3MiOnsicm9sZXMiOlsiZGVmYXVsdC1yb2xlcy1xdWlja3N0YXJ0LXJlYWxtIiwib2ZmbGluZV9hY2Nlc3MiLCJ1bWFfYXV0aG9yaXphdGlvbiJdfSwicmVzb3VyY2VfYWNjZXNzIjp7ImFjY291bnQiOnsicm9sZXMiOlsibWFuYWdlLWFjY291bnQiLCJtYW5hZ2UtYWNjb3VudC1saW5rcyIsInZpZXctcHJvZmlsZSJdfX0sInNjb3BlIjoicHJvZmlsZSBlbWFpbCIsInNpZCI6ImIxNmIyNjJlLTEwNTYtNDUxNS1hNDU1LWYyNWUwNzdjY2I3NiIsImVtYWlsX3ZlcmlmaWVkIjpmYWxzZSwicHJlZmVycmVkX3VzZXJuYW1lIjoicXVpY2tzdGFydC11c2VyIn0.uD_7zfZv5182aLXu9-YBzBDK0nr2mE4FWb_4saTog2JTqFTPZZa99Gm8AIDJx2ZUcZ_ElkATqNUZ4OpWmL2Se5NecMw3slJReewjD6xgpZ3-WvQuTGpoHdW5wN9-Rjy8ungilrnAsnDA3tzctsxm2w6i9KISxvZrzn5Rbk-GN6fxH01VC5eekkPUQJcJgwuJiEiu70SjGnm21xDN4VGkNRC6jrURoclv3j6AeOqDDIV95kA_MTfBswDFMCr2PQlj5U0RTndZqgSoxwFklpjGV09Azp_jnU7L32_Sq-8coZd0nj5mSdbkJLJ8ZDQDV_PP3HjCP7EHdy4P6TyZ7oGvjw","expires_in":300,"refresh_expires_in":1800,"refresh_token":"eyJhbGciOiJIUzI1NiIsInR5cCIgOiAiSldUIiwia2lkIiA6ICI0YjFiNTQ3Yi0zZmZjLTQ5YzQtYjE2Ni03YjdhNzIxMjk1ODcifQ.eyJleHAiOjE2ODAxNjI0NjgsImlhdCI6MTY4MDE2MDY2OCwianRpIjoiYzRjNjNlMTEtZTdlZS00ZmEzLWJlNGYtNDMyZWQ4ZmY5OTQwIiwiaXNzIjoiaHR0cDovLzE5Mi4xNjguNDIuMTQ1OjgwODAvcmVhbG1zL3F1aWNrc3RhcnQtcmVhbG0iLCJhdWQiOiJodHRwOi8vMTkyLjE2OC40Mi4xNDU6ODA4MC9yZWFsbXMvcXVpY2tzdGFydC1yZWFsbSIsInN1YiI6IjE4ODE1YzNhLTZkMDctNGE2Ni1iY2YyLWFkOTY3ZjJiMDExZiIsInR5cCI6IlJlZnJlc2giLCJhenAiOiJhcGlzaXgtcXVpY2tzdGFydC1jbGllbnQiLCJzZXNzaW9uX3N0YXRlIjoiYjE2YjI2MmUtMTA1Ni00NTE1LWE0NTUtZjI1ZTA3N2NjYjc2Iiwic2NvcGUiOiJwcm9maWxlIGVtYWlsIiwic2lkIjoiYjE2YjI2MmUtMTA1Ni00NTE1LWE0NTUtZjI1ZTA3N2NjYjc2In0.8xYP4bhDg1U9B5cTaEVD7B4oxNp8wwAYEynUne_Jm78","token_type":"Bearer","not-before-policy":0,"session_state":"b16b262e-1056-4515-a455-f25e077ccb76","scope":"profile email"}
-```
-
-Define the variable `OIDC_ACCESS_TOKEN` to store the token:
-
-```shell
-# Replace the token with yours
-OIDC_ACCESS_TOKEN="eyJhbGciOiJSUzI1NiIsInR5cCIgOiAiSldUIiwia2lkIiA6ICJ6U3FFaXN6VlpuYi1sRWMzZkp0UHNpU1ZZcGs4RGN3dXI1Mkx5V05aQTR3In0.eyJleHAiOjE2ODAxNjA5NjgsImlhdCI6MTY4MDE2MDY2OCwianRpIjoiMzQ5MTc4YjQtYmExZC00ZWZjLWFlYTUtZGY2MzJiMDJhNWY5IiwiaXNzIjoiaHR0cDovLzE5Mi4xNjguNDIuMTQ1OjgwODAvcmVhbG1zL3F1aWNrc3RhcnQtcmVhbG0iLCJhdWQiOiJhY2NvdW50Iiwic3ViIjoiMTg4MTVjM2EtNmQwNy00YTY2LWJjZjItYWQ5NjdmMmIwMTFmIiwidHlwIjoiQmVhcmVyIiwiYXpwIjoiYXBpc2l4LXF1aWNrc3RhcnQtY2xpZW50Iiwic2Vzc2lvbl9zdGF0ZSI6ImIxNmIyNjJlLTEwNTYtNDUxNS1hNDU1LWYyNWUwNzdjY2I3NiIsImFjciI6IjEiLCJyZWFsbV9hY2Nlc3MiOnsicm9sZXMiOlsiZGVmYXVsdC1yb2xlcy1xdWlja3N0YXJ0LXJlYWxtIiwib2ZmbGluZV9hY2Nlc3MiLCJ1bWFfYXV0aG9yaXphdGlvbiJdfSwicmVzb3VyY2VfYWNjZXNzIjp7ImFjY291bnQiOnsicm9sZXMiOlsibWFuYWdlLWFjY291bnQiLCJtYW5hZ2UtYWNjb3VudC1saW5rcyIsInZpZXctcHJvZmlsZSJdfX0sInNjb3BlIjoicHJvZmlsZSBlbWFpbCIsInNpZCI6ImIxNmIyNjJlLTEwNTYtNDUxNS1hNDU1LWYyNWUwNzdjY2I3NiIsImVtYWlsX3ZlcmlmaWVkIjpmYWxzZSwicHJlZmVycmVkX3VzZXJuYW1lIjoicXVpY2tzdGFydC11c2VyIn0.uD_7zfZv5182aLXu9-YBzBDK0nr2mE4FWb_4saTog2JTqFTPZZa99Gm8AIDJx2ZUcZ_ElkATqNUZ4OpWmL2Se5NecMw3slJReewjD6xgpZ3-WvQuTGpoHdW5wN9-Rjy8ungilrnAsnDA3tzctsxm2w6i9KISxvZrzn5Rbk-GN6fxH01VC5eekkPUQJcJgwuJiEiu70SjGnm21xDN4VGkNRC6jrURoclv3j6AeOqDDIV95kA_MTfBswDFMCr2PQlj5U0RTndZqgSoxwFklpjGV09Azp_jnU7L32_Sq-8coZd0nj5mSdbkJLJ8ZDQDV_PP3HjCP7EHdy4P6TyZ7oGvjw"
-```
-
-Send a request to the route `/anything/test` with authorization header:
-
-```shell
-curl -i "http://127.0.0.1:9080/anything/test" -H "Authorization: Bearer $OIDC_ACCESS_TOKEN"
-```
-
-An `HTTP/1.1 200 OK` response verifies that the OIDC plugin works and accepts requests with valid access token.
-
-### Test With Invalid Access Token
-
-Send a request to `http://127.0.0.1:9080/anything/test` with invalid access token:
-
-```shell
-curl -i "http://127.0.0.1:9080/anything/test -H "Authorization: Bearer invalid-access-token"
-```
-
-An `HTTP/1.1 401 Unauthorized` response verifies that the OIDC plugin works and rejects requests with invalid access token.
-
-### Test Without Access Token
-
-Send a request to `http://127.0.0.1:9080/anything/test` without access token:
-
-```shell
-curl -i "http://127.0.0.1:9080/anything/test"
-```
-
-An `HTTP/1.1 401 Unauthorized` response verifies that the OIDC plugin works and rejects requests without access token.
-
-## Next Steps
-
-APISIX supports more OIDC identity providers, such as Okta, Auth0, and Azure AD (coming soon).
-
-In addition, APISIX also supports other authentication approach such as basic authentication, JWT, and key authentication (coming soon).
diff --git a/enterprise_versioned_docs/version-3.2.2/how-to-guide/observability/log-with-clickhouse.md b/enterprise_versioned_docs/version-3.2.2/how-to-guide/observability/log-with-clickhouse.md
deleted file mode 100644
index abc97e4e..00000000
--- a/enterprise_versioned_docs/version-3.2.2/how-to-guide/observability/log-with-clickhouse.md
+++ /dev/null
@@ -1,240 +0,0 @@
----
-title: Log with ClickHouse
-slug: /how-to-guide/observability/log-with-clickhouse
----
-
-import Tabs from '@theme/Tabs';
-import TabItem from '@theme/TabItem';
-
-APISIX supports collecting route access information and recording it as logs, such as host, client IP, and request timestamp. This key information will be of great help in troubleshooting related problems.
-
-[ClickHouse](https://clickhouse.com/) is an open-source column-oriented database management system (DBMS) for online analytical processing (OLAP). It allows users to generate analytical reports such as log analytics using SQL queries in real-time.
-
-This guide will show you how to enable the `clickhouse-logger` plugin to record the APISIX logs into ClickHouse databases.
-
-## Prerequisite(s)
-
-* Install [Docker](https://docs.docker.com/get-docker/).
-* Install [cURL](https://curl.se/) to send requests to the services for validation.
-* Follow the [Getting Started tutorial](./../../getting-started/) to start a new APISIX instance in Docker.
-
-## Configure ClickHouse
-
-Start a ClickHouse instance named `quickstart-clickhouse-server` with a default database `quickstart_db`, a default user `quickstart-user` and password `quickstart-pass` in Docker:
-
-```shell
-docker run -d \
- --name quickstart-clickhouse-server \
- --network=apisix-quickstart-net \
-# highlight-start
- -e CLICKHOUSE_DB=quickstart_db \
- -e CLICKHOUSE_USER=quickstart-user \
- -e CLICKHOUSE_PASSWORD=quickstart-pass \
-# highlight-end
- -e CLICKHOUSE_DEFAULT_ACCESS_MANAGEMENT=1 \
- --ulimit nofile=262144:262144 \
- clickhouse/clickhouse-server
-```
-
-Connect to the ClickHouse instance using the command line tool `clickhouse-client` in Docker:
-
-```shell
-docker exec -it quickstart-clickhouse-server clickhouse-client
-```
-
-Create a table `test` in database `quickstart_db` with fields `host`, `client_ip`, `route_id`, `@timestamp` of `String` type, or adjust the command accordingly based on your needs:
-
-```sql
-CREATE TABLE quickstart_db.test (
- `host` String,
- `client_ip` String,
- `route_id` String,
- `@timestamp` String,
- PRIMARY KEY(`@timestamp`)
-) ENGINE = MergeTree()
-```
-
-If successful, you should see `Ok` on the output.
-
-Enter `exit` to exit from the command line interface in Docker.
-
-## Enable `clickhouse-logger` Plugin
-
-Create a route:
-
-```shell
-curl -i "http://127.0.0.1:9180/apisix/admin/routes" -X PUT -d '
-{
- "id": "getting-started-ip",
- "uri": "/ip",
- "upstream": {
- "type": "roundrobin",
- "nodes": {
- "httpbin.org:80": 1
- }
- }
-}'
-```
-
-Enable the `clickhouse-logger` plugin globally for all requests, or on a specific route:
-
-
-
-
-Enable the `clickhouse-logger` plugin on all routes:
-
-```shell
-curl -i "http://127.0.0.1:9180/apisix/admin/global_rules" -X PUT -d '
-{
- "id": "clickhouse",
- "plugins": {
- "clickhouse-logger": {
-# highlight-start
-// Annotate 1
- "log_format": {
-# highlight-end
- "host": "$host",
- "@timestamp": "$time_iso8601",
- "client_ip": "$remote_addr"
- },
-# highlight-start
-// Annotate 2
- "user": "quickstart-user",
- "password": "quickstart-pass",
- "database": "quickstart_db",
- "logtable": "test",
- "endpoint_addrs": ["http://quickstart-clickhouse-server:8123"]
-# highlight-end
- }
- }
-}'
-```
-
-➊ Specify all fields corresponding to the ClickHouse table in the log format
-
-➋ ClickHouse server information
-
-An `HTTP/1.1 201 OK` response verifies that the `clickhouse-logger` plugin is enabled successfully.
-
-
-
-
-Enable the `clickhouse-logger` plugin on a specific route:
-
-```shell
-curl -i "http://127.0.0.1:9180/apisix/admin/routes/getting-started-ip" -X PATCH -d '
-{
- "plugins": {
- "clickhouse-logger": {
-# highlight-start
-// Annotate 1
- "log_format": {
-# highlight-end
- "host": "$host",
- "@timestamp": "$time_iso8601",
- "client_ip": "$remote_addr"
- },
-# highlight-start
-// Annotate 2
- "user": "quickstart-user",
- "password": "quickstart-pass",
- "database": "quickstart_db",
- "logtable": "test",
- "endpoint_addrs": ["http://quickstart-clickhouse-server:8123"]
-# highlight-end
- }
- }
-}'
-```
-
-➊ Specify all fields corresponding to the ClickHouse table in the log format
-
-➋ ClickHouse server information
-
-An `HTTP/1.1 200 OK` response verifies that the `clickhouse-logger` plugin is enabled successfully.
-
-
-
-
-## Submit Logs in Batches
-
-The `clickhouse-logger` plugin supports using a batch processor to aggregate and process logs in batches. This avoids frequent submissions of log entries to ClickHouse, which slows down the operations.
-
-By default, the batch processor submits data every 5 seconds or when the data size in a batch reaches 1000 KB. You can adjust the time interval of submission `inactive_timeout` and maximum batch size `batch_max_size` for the plugin. For example, this is how you can set `inactive_timeout` to 10 seconds and `batch_max_size` to 2000 KB:
-
-
-
-
-```shell
-curl -i "http://127.0.0.1:9180/apisix/admin/global_rules/clickhouse" -X PATCH -d '
-{
- "plugins": {
- "clickhouse-logger": {
- "batch_max_size": 2000,
- "inactive_timeout": 10
- }
- }
-}'
-```
-
-
-
-
-```shell
-curl -i "http://127.0.0.1:9180/apisix/admin/routes/getting-started-ip" -X PATCH -d '
-{
- "plugins": {
- "clickhouse-logger": {
- "batch_max_size": 2000,
- "inactive_timeout": 10
- }
- }
-}'
-```
-
-
-
-
-## Verify Logging
-
-Send a request to the route to generate an access log entry:
-
-```shell
-curl -i "http://127.0.0.1:9080/ip"
-```
-
-Connect to the ClickHouse instance using the command line tool `clickhouse-client` in Docker:
-
-```shell
-docker exec -it quickstart-clickhouse-server clickhouse-client
-```
-
-Query all records in table `quickstart_db.test`:
-
-```sql
-SELECT * from quickstart_db.test
-```
-
-You should see an access record similar to the following, which verifies `clickhouse-logger` plugin works as intended.
-
-```text
-┌─host──────┬─client_ip──┬─route_id───────────┬─@timestamp────────────────┐
-│ 127.0.0.1 │ 172.18.0.1 │ getting-started-ip │ 2023-06-07T15:28:24+00:00 │
-└───────────┴────────────┴────────────────────┴───────────────────────────┘
-```
-
-## Next Steps
-
-See `clickhouse-logger` plugin reference to learn more about the plugin configuration options (coming soon).
-
-[//]:
diff --git a/enterprise_versioned_docs/version-3.2.2/how-to-guide/observability/monitor-apisix-with-prometheus.md b/enterprise_versioned_docs/version-3.2.2/how-to-guide/observability/monitor-apisix-with-prometheus.md
deleted file mode 100644
index a6c70117..00000000
--- a/enterprise_versioned_docs/version-3.2.2/how-to-guide/observability/monitor-apisix-with-prometheus.md
+++ /dev/null
@@ -1,129 +0,0 @@
----
-title: Monitor APISIX with Prometheus
-slug: /how-to-guide/observability/monitor-apisix-with-prometheus
----
-
-Prometheus is a popular systems monitoring and alerting toolkit. It collects and stores multi-dimensional time series data like metrics with key-value paired labels.
-
-APISIX offers the capability to expose a significant number of metrics to Prometheus [with low latency](https://api7.ai/blog/1s-to-10ms-reducing-prometheus-delay-in-api-gateway), allowing for continuous monitoring and diagnostics.
-
-This guide will show you how to enable the `prometheus` plugin to integrate with Prometheus and Grafana services, where APISIX HTTP metrics are collected and visualized.
-
-
-
-

-
-
-
-## Prerequisite(s)
-
-* Install [Docker](https://docs.docker.com/get-docker/).
-* Install [cURL](https://curl.se/) to send requests to the services for validation.
-* Follow the [Getting Started tutorial](./../../getting-started/) to start a new APISIX instance in Docker.
-
-## Enable Prometheus Plugin
-
-Create a global rule to enable the `prometheus` plugin on all routes by adding `"prometheus": {}` in the plugins option.
-
-```shell
-curl -i "http://127.0.0.1:9180/apisix/admin/global_rules" -X PUT -d '{
- "id": "rule-for-metrics",
- "plugins": {
- "prometheus":{}
- }
-}'
-```
-
-APISIX gathers internal runtime metrics and exposes them through port `9091` and path `/apisix/prometheus/metrics` by default. The port and path are supported to be customized in APISIX.
-
-Send a request to the route `/apisix/prometheus/metrics` to fetch metrics from APISIX:
-
-```shell
-docker exec apisix-quickstart curl -sL "http://apisix-quickstart:9091/apisix/prometheus/metrics"
-```
-
-Responded metrics are similar to the following, holding all routes (such as an existing route `/ip` here) metrics:
-
-```text
-# HELP apisix_etcd_modify_indexes Etcd modify index for APISIX keys
-# TYPE apisix_etcd_modify_indexes gauge
-apisix_etcd_modify_indexes{key="consumers"} 0
-apisix_etcd_modify_indexes{key="global_rules"} 0
-apisix_etcd_modify_indexes{key="max_modify_index"} 16
-apisix_etcd_modify_indexes{key="prev_index"} 15
-apisix_etcd_modify_indexes{key="protos"} 0
-apisix_etcd_modify_indexes{key="routes"} 16
-apisix_etcd_modify_indexes{key="services"} 0
-apisix_etcd_modify_indexes{key="ssls"} 0
-apisix_etcd_modify_indexes{key="stream_routes"} 0
-apisix_etcd_modify_indexes{key="upstreams"} 0
-apisix_etcd_modify_indexes{key="x_etcd_index"} 16
-# HELP apisix_etcd_reachable Config server etcd reachable from APISIX, 0 is unreachable
-# TYPE apisix_etcd_reachable gauge
-apisix_etcd_reachable 1
-...
-# HELP apisix_http_status HTTP status codes per service in APISIX
-# TYPE apisix_http_status counter
-apisix_http_status{code="200",route="ip",matched_uri="/ip",matched_host="",service="",consumer="",node="52.20.124.211"} 1
-...
-```
-
-## Configure Prometheus
-
-Targets are monitored objects in Prometheus. You can configure the APISIX metric endpoint as a target in the Prometheus configuration file `prometheus.yml`.
-
-```shell
-echo 'scrape_configs:
- - job_name: "apisix"
- scrape_interval: 15s
- metrics_path: "/apisix/prometheus/metrics"
- static_configs:
- - targets: ["apisix-quickstart:9091"]
-' > prometheus.yml
-```
-
-Start a Prometheus instance in Docker. The exposed port is mapped to `9092` on the host because `9090` is reserved for APISIX. The local configuration file `prometheus.yml` is mounted to the Prometheus container.
-
-```shell
-docker run -d --name apisix-quickstart-prometheus \
- -p 9092:9090 \
- --network=apisix-quickstart-net \
- -v $(pwd)/prometheus.yml:/etc/prometheus/prometheus.yml \
- prom/prometheus:latest
-```
-
-You can now check if the state is "UP" on the Prometheus webpage. Prometheus will collect metrics from APISIX by scraping its metric HTTP endpoint.
-
-
-
-## Configure Grafana
-
-Grafana can visualize metrics stored in Prometheus. Start a Grafana instance on port `3000` in Docker.
-
-```shell
-docker run -d --name=apisix-quickstart-grafana \
- -p 3000:3000 \
- --network=apisix-quickstart-net \
- grafana/grafana-oss
-```
-
-Add the Prometheus instance created above to Grafana as a data source:
-
-
-
-The official APISIX metric dashboard is published to [Grafana dashboards](https://grafana.com/grafana/dashboards/) with ID [11719](https://grafana.com/grafana/dashboards/11719-apache-apisix/). You can then import the dashboard into Grafana with the ID.
-
-
-
-If everything is OK, the dashboard will automatically visualize metrics in real time.
-
-
-
-## Next Steps
-
-You have now learned how to monitor APISIX metrics with Prometheus and visualize them in Grafana.
-
-Explore other resources in How-To Guides to monitor APISIX logs and traces (coming soon).
-
-[//]:
-[//]:
diff --git a/enterprise_versioned_docs/version-3.2.2/how-to-guide/security/secrets-management/manage-secrets-in-hashicorp-vault.md b/enterprise_versioned_docs/version-3.2.2/how-to-guide/security/secrets-management/manage-secrets-in-hashicorp-vault.md
deleted file mode 100644
index 146f4374..00000000
--- a/enterprise_versioned_docs/version-3.2.2/how-to-guide/security/secrets-management/manage-secrets-in-hashicorp-vault.md
+++ /dev/null
@@ -1,265 +0,0 @@
----
-title: Manage APISIX Secrets in HashiCorp Vault
-slug: /how-to-guide/security/secrets-management/manage-secrets-in-hashicorp-vault
----
-
-[HashiCorp Vault](https://www.vaultproject.io/) is a centralized platform for managing secrets and encryption across different environments and applications. It provides a unified secrets management for storing and accessing, such as API keys, passwords, certificates, and more.
-
-APISIX supports storing sensitive data in the configuration file as secrets, such as `admin_key`, etcd `username`, `password`.
-
-This guide will show you how to configure HashiCorp Vault as a secrets manager, then store the APISIX `admin_key` in Vault and reference the key in APISIX configuration file.
-
-
-
-## Prerequisite(s)
-
-* Install [Docker](https://docs.docker.com/get-docker/).
-* Install [cURL](https://curl.se/) to send requests to the services for validation.
-* Install [ZIP](https://infozip.sourceforge.net/Zip.html) to unzip the Vault binary from the [official distributed zipped file](https://developer.hashicorp.com/vault/downloads).
-* Follow the [Getting Started tutorial](../../../getting-started/) to start a new APISIX instance in Docker.
-
-## Configure Vault Server
-
-Start a Vault instance in dev mode in Docker named `apisix-quickstart-vault` with the token `apisix-quickstart-vault-token`. The exposed port is mapped to `8200` on the host machine:
-
-```shell
-docker run -d --cap-add=IPC_LOCK \
- -e 'VAULT_DEV_LISTEN_ADDRESS=0.0.0.0:8200' \
- -e 'VAULT_ADDR=http://127.0.0.1:8200' \
-# highlight-start
- -e 'VAULT_DEV_ROOT_TOKEN_ID=apisix-quickstart-vault-token' \
- -e 'VAULT_TOKEN=apisix-quickstart-vault-token' \
- --network=apisix-quickstart-net \
- --name apisix-quickstart-vault \
-# highlight-end
- -p 8200:8200 vault:1.13.0
-```
-
-APISIX needs permission to access Vault and retrieve secrets. You should create a policy file in [HashiCorp Configuration Language (HCL)](https://github.com/hashicorp/hcl) to generate a Vault access token for APISIX.
-
-Create a Vault policy file named `apisix-policy.hcl` in the Vault instance to grant read permission of the path `secret/` to APISIX. You can put secrets under the path `secret/` to allow APISIX to read them:
-
-```shell
-docker exec apisix-quickstart-vault /bin/sh -c "echo '
-# highlight-start
-path \"secret/data/*\" {
- capabilities = [\"read\"]
-}
-# highlight-end
-' > /etc/apisix-policy.hcl"
-```
-
-Apply the policy file to the Vault instance:
-
-```shell
-docker exec apisix-quickstart-vault vault policy write apisix-policy /etc/apisix-policy.hcl
-```
-
-Next, generate the access token attached to the newly defined policy for APISIX to access Vault:
-
-```shell
-docker exec apisix-quickstart-vault vault token create -policy="apisix-policy"
-```
-
-Every execution of the above command will generate a different token. If successful, the output should be similar to the following:
-
-```text
-Key Value
---- -----
-# highlight-start
-token hvs.CAESIHUznrV4wgcifUia0FROd6iprK7NjipAiHBYwiZDQP9TGh4KHGh2cy5ndHc5dzBPbXd5Y1pzblZXd2ZuQXA3ZHI
-# highlight-end
-token_accessor YY4iCj2lICDNd50ZJDsBjvZK
-token_duration 768h
-token_renewable true
-token_policies ["apisix-policy" "default"]
-identity_policies []
-policies ["apisix-policy" "default"]
-```
-
-Copy the value of token and create a file named `apisix-vault-token` to store it in the APISIX instance:
-
-```shell
-docker exec apisix-quickstart /bin/sh -c "echo '
-# highlight-start
-hvs.CAESIHUznrV4wgcifUia0FROd6iprK7NjipAiHBYwiZDQP9TGh4KHGh2cy5ndHc5dzBPbXd5Y1pzblZXd2ZuQXA3ZHI
-# highlight-end
-' > /usr/local/apisix/conf/apisix-vault-token"
-```
-
-Vault Agent uses the token `apisix-vault-token` to authenticate with Vault in the next step.
-
-## Configure Vault Agent
-
-Vault Agent is a client daemon that runs with your applications and automates authentication with Vault and token renewal. It acts as a proxy for Vault's API and renders Vault secrets as files.
-
-Sensitive configurations in APISIX can be stored in the Vault, and then the Vault Agent reads them and inject them into the APISIX configuration file, such as the `admin_key`.
-
-Download the Vault binary and copy it into the APISIX instance:
-
-```shell
-wget https://releases.hashicorp.com/vault/1.13.0/vault_1.13.0_linux_amd64.zip
-unzip vault_1.13.0_linux_amd64.zip
-docker cp vault apisix-quickstart:/usr/local/bin/
-```
-
-Create a file in APISIX instance named `vault-agent-apisix.hcl` to configure how the Vault Agent accesses the server and renders the secrets.
-
-```shell
-docker exec apisix-quickstart /bin/sh -c "echo '
-pid_file = \"./pidfile\"
-
-vault {
- # highlight-start
- // Annotate 1
- address = \"http://apisix-quickstart-vault:8200\"
- # highlight-end
- retry {
- num_retries = 5
- }
-}
-
-auto_auth {
- method {
- type = \"token_file\"
- config = {
- # highlight-start
- // Annotate 2
- token_file_path = \"/usr/local/apisix/conf/apisix-vault-token\"
- # highlight-end
- }
- }
-}
-
-# highlight-start
-// Annotate 3
-listener \"tcp\" {
- address = \"127.0.0.1:8100\"
- tls_disable = true
-}
-# highlight-end
-
-template {
- # highlight-start
- // Annotate 4
- source = \"/usr/local/apisix/conf/config.ctmpl\"
- // Annotate 5
- destination = \"/usr/local/apisix/conf/config.yaml\"
- # highlight-end
-}
-' > /usr/local/apisix/conf/vault-agent-apisix.hcl"
-```
-
-❶ Vault address
-
-❷ Token for authentication
-
-❸ Vault Agent daemon listening attributes
-
-❹ Template file for rendering
-
-❺ Rendered APISIX configuration file
-
-Vault Agent injects secrets into the APISIX configuration file `config.yaml` according to the template file `config.ctmpl` after it is started. Both two files are stored in the APISIX default configuration path `/usr/local/apisix/conf/`.
-
-## Store a Secret
-
-Create a secret `adminKey=apisix-quickstart-key` and store it in the path `secret/apisix/` of Vault:
-
-```shell
-docker exec apisix-quickstart-vault vault kv put secret/apisix adminKey=apisix-quickstart-key
-```
-
-The expected response is similar to the following:
-
-```text
-=== Secret Path ===
-secret/data/apisix
-
-======= Metadata =======
-Key Value
---- -----
-created_time 2023-03-15T11:42:17.123175125Z
-custom_metadata
-deletion_time n/a
-destroyed false
-version 1
-```
-
-## Use the Secret
-
-Create an APISIX template configuration file named `config.ctmpl` in the APISIX instance. Populate the `admin_key` using [Consul Template syntax](https://github.com/hashicorp/consul-template/blob/v0.28.1/docs/templating-language.md), which will be replaced with `secret/apisix/adminKey` at runtime.
-
-[//]:
-
-```shell
-docker exec apisix-quickstart /bin/sh -c "echo '
-deployment:
- role: traditional
- role_traditional:
- config_provider: etcd
- admin:
- allow_admin:
- - 0.0.0.0/0
- admin_key:
- -
- name: admin
- # highlight-start
- key: {{ with secret \"secret/apisix\"}} {{ .Data.data.adminKey }} {{ end }}
- # highlight-end
- role: admin
-plugin_attr:
- prometheus:
- export_addr:
- ip: 0.0.0.0
- port: 9091
-' > /usr/local/apisix/conf/config.ctmpl"
-```
-
-The following command starts the Vault Agent daemon in the APISIX instance with the configuration file `vault-agent-apisix.hcl` created above. The Agent will retrieves the secret `admin_key` and injects it into the APISIX configuration file `config.yaml`.
-
-```shell
-docker exec -d apisix-quickstart vault agent -config=/usr/local/apisix/conf/vault-agent-apisix.hcl
-```
-
-Reload the APISIX container for configuration changes to take effect:
-
-```shell
-docker exec apisix-quickstart apisix reload
-```
-
-## Validate
-
-To verify that the new `admin_key` is in effect:
-
-1. Request Admin API with the correct `admin_key`:
-
- ```shell
- curl -i "http://localhost:9180/apisix/admin/routes" -H 'X-API-KEY: apisix-quickstart-key'
- ```
-
- The expected response is similar to the following:
-
- ```text
- HTTP/1.1 200 OK
- ...
- ```
-
-2. Request Admin API with a wrong `admin_key`:
-
- ```shell
- curl -i "http://localhost:9180/apisix/admin/routes" -H 'X-API-KEY: wrong-key'
- ```
-
- The expected response is similar to the following:
-
- ```
- HTTP/1.1 401 Unauthorized
- ...
- ```
-
-## Next Steps
-
-APISIX supports Vault as the backend to manage other types of secrets, such as JWT tokens and certificates.
-
-See other guides in this chapter (coming soon) to learn more about integrating Vault with APISIX for authentication and certificates management.
diff --git a/enterprise_versioned_docs/version-3.2.2/how-to-guide/traffic-management/tls-and-mtls/configure-https-between-client-and-apisix.md b/enterprise_versioned_docs/version-3.2.2/how-to-guide/traffic-management/tls-and-mtls/configure-https-between-client-and-apisix.md
deleted file mode 100644
index 69886490..00000000
--- a/enterprise_versioned_docs/version-3.2.2/how-to-guide/traffic-management/tls-and-mtls/configure-https-between-client-and-apisix.md
+++ /dev/null
@@ -1,156 +0,0 @@
----
-title: Configure HTTPS Between Client and APISIX
-slug: /how-to-guide/traffic-management/tls-and-mtls/configure-https-between-client-and-apisix
----
-
-_TLS (Transport Layer Security)_ is a cryptographic protocol designed to secure communication between two entities. Enforcing HTTPS between clients and APISIX improves the security and authenticity during the data transmission.
-
-This guide will show you how to configure HTTPS between client applications and APISIX.
-
-
-
-

-
-
-
-## Prerequisite(s)
-
-* Install [Docker](https://docs.docker.com/get-docker).
-* Install [cURL](https://curl.se/) to send requests to the services for validation.
-* Follow the [Getting Started tutorial](../../../getting-started/) to start a new APISIX instance in Docker.
-
-## Create a Route
-
-Create a route that forwards all requests to `/ip` to the upstream `httpbin.org`:
-
-```shell
-curl -i "http://127.0.0.1:9180/apisix/admin/routes" -X PUT -d '
-{
- "id": "quickstart-client-ip",
- "uri": "/ip",
- "upstream": {
- "nodes": {
- "httpbin.org:80":1
- },
- "type": "roundrobin"
- }
-}'
-```
-
-An `HTTP/1.1 200 OK` response verifies that the route is created successfully.
-
-## Generate Certificates and Keys
-
-Generate the certificate authority (CA) key and certificate:
-
-```shell
-openssl genrsa -out ca.key 2048 && \
- openssl req -new -sha256 -key ca.key -out ca.csr -subj "/CN=ROOTCA" && \
- openssl x509 -req -days 36500 -sha256 -extensions v3_ca -signkey ca.key -in ca.csr -out ca.crt
-```
-
-Generate the key and certificate with the common name `test.com` for APISIX, and sign with the CA certificate:
-
-```shell
-openssl genrsa -out server.key 2048 && \
- openssl req -new -sha256 -key server.key -out server.csr -subj "/CN=test.com" && \
- openssl x509 -req -days 36500 -sha256 -extensions v3_req \
- -CA ca.crt -CAkey ca.key -CAserial ca.srl -CAcreateserial \
- -in server.csr -out server.crt
-```
-
-## Configure HTTPS for APISIX
-
-Load the content stored in `server.crt` and `server.key` into shell variables:
-
-```shell
-server_cert=$(cat server.crt)
-server_key=$(cat server.key)
-```
-
-Create an SSL certificate object to save the server certificate and its key:
-
-```shell
-curl -i "http://127.0.0.1:9180/apisix/admin/ssls" -X PUT -d '
-{
- "id": "quickstart-tls-client-ssl",
-# highlight-start
-// Annotate 1
- "sni": "test.com",
-// Annotate 2
- "cert": "'"${server_cert}"'",
-// Annotate 3
- "key": "'"${server_key}"'"
-# highlight-end
-}'
-```
-
-❶ `sni`: `test.com`, the same as server certificate CN value
-
-❷ `cert`: server certificate
-
-❸ `key`: private key for the server certificate
-
-## Verify HTTPS between Client and APISIX
-
-As the certificate is only valid for the CN `test.com`, you should use `test.com` as the domain name where APISIX is hosted.
-
-Send a request to `https://test.com:9443/ip` and resolve `test.com` to `127.0.0.1`:
-
-```shell
-curl -ikv --resolve "test.com:9443:127.0.0.1" "https://test.com:9443/ip"
-```
-
-A TLS handshake process similar to the following verifies the TLS between client and APISIX is enabled:
-
-```text
-* Added test.com:9443:127.0.0.1 to DNS cache
-* Hostname test.com was found in DNS cache
-* Trying 127.0.0.1:9443...
-* Connected to test.com (127.0.0.1) port 9443 (#0)
-* ALPN, offering h2
-* ALPN, offering http/1.1
-* successfully set certificate verify locations:
-* CAfile: /etc/ssl/certs/ca-certificates.crt
-* CApath: /etc/ssl/certs
-# highlight-start
-* TLSv1.3 (OUT), TLS handshake, Client hello (1):
-* TLSv1.3 (IN), TLS handshake, Server hello (2):
-* TLSv1.3 (IN), TLS handshake, Encrypted Extensions (8):
-* TLSv1.3 (IN), TLS handshake, Certificate (11):
-* TLSv1.3 (IN), TLS handshake, CERT verify (15):
-* TLSv1.3 (IN), TLS handshake, Finished (20):
-* TLSv1.3 (OUT), TLS change cipher, Change cipher spec (1):
-* TLSv1.3 (OUT), TLS handshake, Finished (20):
-* SSL connection using TLSv1.3 / TLS_AES_256_GCM_SHA384
-# highlight-end
-* ALPN, server accepted to use h2
-* Server certificate:
-* subject: CN=test.com
-* start date: Apr 21 07:47:54 2023 GMT
-* expire date: Mar 28 07:47:54 2123 GMT
-* issuer: CN=ROOTCA
-* SSL certificate verify result: unable to get local issuer certificate (20), continuing anyway.
-* Using HTTP2, server supports multi-use
-* Connection state changed (HTTP/2 confirmed)
-* Copying HTTP/2 data in stream buffer to connection buffer after upgrade: len=0
-* Using Stream ID: 1 (easy handle 0x556274d632e0)
-> GET /ip HTTP/2
-> Host: test.com:9443
-> user-agent: curl/7.74.0
-> accept: */*
->
-* TLSv1.3 (IN), TLS handshake, Newsession Ticket (4):
-* TLSv1.3 (IN), TLS handshake, Newsession Ticket (4):
-* old SSL session ID is stale, removing
-* Connection state changed (MAX_CONCURRENT_STREAMS == 128)!
-< HTTP/2 200
-HTTP/2 200
-...
-```
-
-## Next Steps
-
-You can learn more about TLS and APISIX SSL object in [SSL Certificates](../../../key-concepts/ssl-certificates).
-
-APISIX also supports mTLS connection between clients and APISIX. See [Configure mTLS between client and APISIX](../tls-and-mtls/configure-mtls-between-client-and-apisix) for more details.
diff --git a/enterprise_versioned_docs/version-3.2.2/how-to-guide/traffic-management/tls-and-mtls/configure-mtls-between-client-and-apisix.md b/enterprise_versioned_docs/version-3.2.2/how-to-guide/traffic-management/tls-and-mtls/configure-mtls-between-client-and-apisix.md
deleted file mode 100644
index ec0d5cee..00000000
--- a/enterprise_versioned_docs/version-3.2.2/how-to-guide/traffic-management/tls-and-mtls/configure-mtls-between-client-and-apisix.md
+++ /dev/null
@@ -1,248 +0,0 @@
----
-title: Configure mTLS Between Client and APISIX
-slug: /how-to-guide/traffic-management/tls-and-mtls/configure-mtls-between-client-and-apisix
----
-
-_Mutual TLS (mTLS)_ is a two-way TLS where client and the server authenticate each other. It is typically implemented to prevent unauthorized access and harden security.
-
-This guide will show you how to configure mTLS between downstream client applications and APISIX.
-
-
-
-

-
-
-
-## Prerequisite(s)
-
-* Install [Docker](https://docs.docker.com/get-docker).
-* Install [cURL](https://curl.se/) to send requests to the services for validation.
-* Follow the [Getting Started tutorial](../../../getting-started/) to start a new APISIX instance in Docker.
-
-## Create a Route
-
-Create a route that forwards all requests to `/ip` to the upstream `httpbin.org`:
-
-```shell
-curl -i "http://127.0.0.1:9180/apisix/admin/routes" -X PUT -d '
-{
- "id": "quickstart-ip",
- "uri": "/ip",
- "upstream": {
- "nodes": {
- "httpbin.org:80":1
- },
- "type": "roundrobin"
- }
-}'
-```
-
-An `HTTP/1.1 200 OK` response verifies that the route is created successfully.
-
-## Generate Certificates and Keys
-
-Generate the certificate authority (CA) key and certificate:
-
-```shell
-openssl genrsa -out ca.key 2048 && \
- openssl req -new -sha256 -key ca.key -out ca.csr -subj "/CN=ROOTCA" && \
- openssl x509 -req -days 36500 -sha256 -extensions v3_ca -signkey ca.key -in ca.csr -out ca.crt
-```
-
-Generate the key and certificate with the common name `test.com` for APISIX, and sign with the CA certificate:
-
-```shell
-openssl genrsa -out server.key 2048 && \
- openssl req -new -sha256 -key server.key -out server.csr -subj "/CN=test.com" && \
- openssl x509 -req -days 36500 -sha256 -extensions v3_req \
- -CA ca.crt -CAkey ca.key -CAserial ca.srl -CAcreateserial \
- -in server.csr -out server.crt
-```
-
-Generate the key and certificate with the common name `CLIENT` for a client, and sign with the CA certificate:
-
-```shell
-openssl genrsa -out client.key 2048 && \
- openssl req -new -sha256 -key client.key -out client.csr -subj "/CN=CLIENT" && \
- openssl x509 -req -days 36500 -sha256 -extensions v3_req \
- -CA ca.crt -CAkey ca.key -CAserial ca.srl -CAcreateserial \
- -in client.csr -out client.crt
-```
-
-## Configure mTLS for APISIX
-
-Load the content stored in `server.crt`, `server.key`, and `ca.crt` into shell variables:
-
-```shell
-server_cert=$(cat server.crt)
-server_key=$(cat server.key)
-ca_cert=$(cat ca.crt)
-```
-
-Create an SSL certificate object to save the server certificate, server certificate key, and CA certificate:
-
-```shell
-curl -i "http://127.0.0.1:9180/apisix/admin/ssls" -X PUT -d '
-{
- "id": "quickstart-mtls-client-ssl",
-# highlight-start
-// Annotate 1
- "sni": "test.com",
-// Annotate 2
- "cert": "'"${server_cert}"'",
-// Annotate 3
- "key": "'"${server_key}"'",
-# highlight-end
- "client": {
-# highlight-start
-// Annotate 4
- "ca": "'"${ca_cert}"'"
-# highlight-end
- }
-}'
-```
-
-❶ `sni`: `test.com`, the same as server certificate CN value
-
-❷ `cert`: server certificate `server.crt`
-
-❸ `key`: server certificate key `server.key`
-
-❹ `client.ca`: CA certificate `ca.crt`
-
-## Verify mTLS between Client and APISIX
-
-### With Client Certificate
-
-As the certificate is only valid for the CN `test.com`, you should use `test.com` as the domain name where APISIX is hosted.
-
-Send a request to `https://test.com:9443/ip` with client certificate and resolve `test.com` to `127.0.0.1`:
-
-```shell
-curl -ikv --resolve "test.com:9443:127.0.0.1" "https://test.com:9443/ip" \
- --cert client.crt --key client.key
-```
-
-An mTLS handshake similar to the following verifies the mTLS between client and APISIX is enabled:
-
-```text
-* Added test.com:9443:127.0.0.1 to DNS cache
-* Hostname test.com was found in DNS cache
-* Trying 127.0.0.1:9443...
-* Connected to test.com (127.0.0.1) port 9443 (#0)
-* ALPN, offering h2
-* ALPN, offering http/1.1
-* successfully set certificate verify locations:
-* CAfile: /etc/ssl/certs/ca-certificates.crt
-* CApath: /etc/ssl/certs
-* TLSv1.3 (OUT), TLS handshake, Client hello (1):
-* TLSv1.3 (IN), TLS handshake, Server hello (2):
-* TLSv1.3 (IN), TLS handshake, Encrypted Extensions (8):
-# highlight-start
-* TLSv1.3 (IN), TLS handshake, Request CERT (13):
-* TLSv1.3 (IN), TLS handshake, Certificate (11):
-* TLSv1.3 (IN), TLS handshake, CERT verify (15):
-* TLSv1.3 (IN), TLS handshake, Finished (20):
-# highlight-end
-* TLSv1.3 (OUT), TLS change cipher, Change cipher spec (1):
-# highlight-start
-* TLSv1.3 (OUT), TLS handshake, Certificate (11):
-* TLSv1.3 (OUT), TLS handshake, CERT verify (15):
-* TLSv1.3 (OUT), TLS handshake, Finished (20):
-# highlight-end
-* SSL connection using TLSv1.3 / TLS_AES_256_GCM_SHA384
-* ALPN, server accepted to use h2
-* Server certificate:
-* subject: CN=test.com
-* start date: Apr 21 07:47:54 2023 GMT
-* expire date: Mar 28 07:47:54 2123 GMT
-* issuer: CN=ROOTCA
-* SSL certificate verify result: unable to get local issuer certificate (20), continuing anyway.
-* Using HTTP2, server supports multi-use
-* Connection state changed (HTTP/2 confirmed)
-* Copying HTTP/2 data in stream buffer to connection buffer after upgrade: len=0
-* Using Stream ID: 1 (easy handle 0x5625339a72e0)
-> GET /ip HTTP/2
-> Host: test.com:9443
-> user-agent: curl/7.74.0
-> accept: */*
->
-* TLSv1.3 (IN), TLS handshake, Newsession Ticket (4):
-* TLSv1.3 (IN), TLS handshake, Newsession Ticket (4):
-* old SSL session ID is stale, removing
-* Connection state changed (MAX_CONCURRENT_STREAMS == 128)!
-# highlight-start
-< HTTP/2 200
-HTTP/2 200
-# highlight-end
-...
-```
-
-Note that APISIX and the client successfully verified each other's certificate during the handshake and established a connection.
-
-### Without Client Certificate
-
-Send a request to `https://test.com:9443/ip` but without client certificate:
-
-```shell
-curl -ikv --resolve "test.com:9443:127.0.0.1" "https://test.com:9443/ip"
-```
-
-A failed mTLS handshake is similar to the following:
-
-```text
-* Added test.com:9443:127.0.0.1 to DNS cache
-* Hostname test.com was found in DNS cache
-* Trying 127.0.0.1:9443...
-* Connected to test.com (127.0.0.1) port 9443 (#0)
-* ALPN, offering h2
-* ALPN, offering http/1.1
-* successfully set certificate verify locations:
-* CAfile: /etc/ssl/certs/ca-certificates.crt
-* CApath: /etc/ssl/certs
-* TLSv1.3 (OUT), TLS handshake, Client hello (1):
-* TLSv1.3 (IN), TLS handshake, Server hello (2):
-* TLSv1.3 (IN), TLS handshake, Encrypted Extensions (8):
-# highlight-start
-* TLSv1.3 (IN), TLS handshake, Request CERT (13):
-* TLSv1.3 (IN), TLS handshake, Certificate (11):
-* TLSv1.3 (IN), TLS handshake, CERT verify (15):
-* TLSv1.3 (IN), TLS handshake, Finished (20):
-# highlight-end
-* TLSv1.3 (OUT), TLS change cipher, Change cipher spec (1):
-# highlight-start
-* TLSv1.3 (OUT), TLS handshake, Certificate (11):
-* TLSv1.3 (OUT), TLS handshake, Finished (20):
-# highlight-end
-* SSL connection using TLSv1.3 / TLS_AES_256_GCM_SHA384
-* ALPN, server accepted to use h2
-* Server certificate:
-* subject: CN=test.com
-* start date: Apr 21 07:47:54 2023 GMT
-* expire date: Mar 28 07:47:54 2123 GMT
-* issuer: CN=ROOTCA
-* SSL certificate verify result: unable to get local issuer certificate (20), continuing anyway.
-* Using HTTP2, server supports multi-use
-* Connection state changed (HTTP/2 confirmed)
-* Copying HTTP/2 data in stream buffer to connection buffer after upgrade: len=0
-* Using Stream ID: 1 (easy handle 0x55f791e252e0)
-> GET /ip HTTP/2
-> Host: test.com:9443
-> user-agent: curl/7.74.0
-> accept: */*
->
-* TLSv1.3 (IN), TLS alert, unknown (628):
-* OpenSSL SSL_read: error:1409445C:SSL routines:ssl3_read_bytes:tlsv13 alert certificate required, errno 0
-* Failed receiving HTTP2 data
-* OpenSSL SSL_write: SSL_ERROR_ZERO_RETURN, errno 0
-* Failed sending HTTP2 data
-* Connection #0 to host test.com left intact
-```
-
-the handshake failed due to the lack of client certificate.
-
-## Next Steps
-
-You can learn more about mTLS and APISIX SSL object in [SSL Certificates](../../../key-concepts/ssl-certificates).
-
-APISIX also supports TLS connection between clients and APISIX. See [Configure HTTPS between client and APISIX](../tls-and-mtls/configure-https-between-client-and-apisix) for more details.
diff --git a/enterprise_versioned_docs/version-3.2.2/how-to-guide/traffic-management/tls-and-mtls/configure-upstream-https.md b/enterprise_versioned_docs/version-3.2.2/how-to-guide/traffic-management/tls-and-mtls/configure-upstream-https.md
deleted file mode 100644
index 14d9ce02..00000000
--- a/enterprise_versioned_docs/version-3.2.2/how-to-guide/traffic-management/tls-and-mtls/configure-upstream-https.md
+++ /dev/null
@@ -1,63 +0,0 @@
----
-title: Configure Upstream HTTPS
-slug: /how-to-guide/traffic-management/tls-and-mtls/configure-upstream-https
----
-
-_TLS (Transport Layer Security)_ is a cryptographic protocol designed to secure communication between two parties, such as a web browser and a web server. Services often require TLS if traffic between the API gateway and upstream services is not considered secure or private.
-
-This guide will show you how to configure TLS between APISIX and an upstream service.
-
-
-
-

-
-
-
-## Prerequisite(s)
-
-* Install [Docker](https://docs.docker.com/get-docker).
-* Install [cURL](https://curl.se/) to send requests to the services for validation.
-* Install and run APISIX, or follow the [Getting Started tutorial](../../../getting-started/) to start a new APISIX instance in Docker.
-
-## Create a Route With TLS Enabled
-
-Create a route to an example upstream [httpbin.org](https://httpbin.org) on its default HTTPS port `443`:
-
-```shell
-curl -i "http://127.0.0.1:9180/apisix/admin/routes" -X PUT -d '
-{
- "id": "quickstart-tls-upstream",
- "uri": "/ip",
- "upstream": {
-# highlight-start
-// Annotate 1
- "scheme": "https",
- "nodes": {
- // Annotate 2
- "httpbin.org:443":1
- },
-# highlight-end
- "type": "roundrobin"
- }
-}'
-```
-
-❶ Configure scheme as `https`
-
-❷ Configure port as `443`
-
-## Test TLS between APISIX and Upstream
-
-Send a request to the route:
-
-```shell
-curl -i -k "http://127.0.0.1:9080/ip"
-```
-
-An `HTTP/1.1 200 OK` response verifies that APISIX has successfully established connection and communicated with the upstream service over HTTPS.
-
-## Next Steps
-
-APISIX also supports TLS connection between clients and APISIX. See Enable Downstream TLS how-to guide for more details (coming soon).
-
-[//]:
diff --git a/enterprise_versioned_docs/version-3.2.2/how-to-guide/transformation/transcode-http-to-grpc.md b/enterprise_versioned_docs/version-3.2.2/how-to-guide/transformation/transcode-http-to-grpc.md
deleted file mode 100644
index 20f12499..00000000
--- a/enterprise_versioned_docs/version-3.2.2/how-to-guide/transformation/transcode-http-to-grpc.md
+++ /dev/null
@@ -1,156 +0,0 @@
----
-title: Transcode HTTP to gRPC
-slug: /how-to-guide/transformation/transcode-http-to-grpc
----
-
-[gRPC](https://grpc.io/) is an open-source high performance Remote Procedure Call (RPC) framework based on HTTP/2 protocol. It uses [protocol buffers (protobuf)](https://protobuf.dev/) as the interface description language (IDL).
-
-APISIX provides the capability to transform between HTTP and gRPC requests and responses, using the plugin `grpc-transcode` and [proto objects](../../background-information/key-concepts/protos.md).
-
-This guide will show you how to use the plugin `grpc-transcode` to transform RESTful HTTP requests to gRPC requests.
-
-
-
-

-
-
-
-## Prerequisite(s)
-
-* Install [Docker](https://docs.docker.com/get-docker/).
-* Install [cURL](https://curl.se/) to send requests to APISIX for validation.
-* Install [gRPCurl](https://github.com/fullstorydev/grpcurl) to send requests to gRPC services for validation.
-* Follow the [Getting Started tutorial](./../../getting-started/) to start a new APISIX instance in Docker.
-
-## Deploy an Example gRPC Server
-
-Start an [example gRPC server](https://github.com/api7/grpc_server_example) Docker instance `quickstart-grpc-example` on port `50051`:
-
-```shell
-docker run -d \
- --name quickstart-grpc-example \
- --network=apisix-quickstart-net \
- -p 50051:50051 \
- api7/grpc-server-example:1.0.2
-```
-
-This example gRPC server holds several services, such as `echo.EchoService`:
-
-```proto title="echo.proto"
-syntax = "proto3";
-
-package echo;
-
-service EchoService {
- rpc Echo (EchoMsg) returns (EchoMsg);
-}
-
-message EchoMsg {
- string msg = 1;
-}
-```
-
-In this example, `Echo` is a method in `EchoService` that accepts parameter type `string` defined in `EchoMsg`.
-
-Test the gRPC method `echo.EchoService.Echo` using gRPCurl:
-
-```shell
-grpcurl -plaintext -d '{"msg": "Hello"}' "127.0.0.1:50051" "echo.EchoService/Echo"
-```
-
-A response similar to the following verifies that the gRPC service is working:
-
-```text
-{
- "msg": "Hello"
-}
-```
-
-## Create a Proto Object to Store Protobuf File
-
-Store the protobuf file `echo.proto` as a proto object in APISIX with the id `quickstart-proto`:
-
-```shell
-curl -i "http://127.0.0.1:9180/apisix/admin/protos" -X PUT -d '
-{
- "id": "quickstart-proto",
- "content": "syntax = \"proto3\";
-
- package echo;
-
- service EchoService {
- rpc Echo (EchoMsg) returns (EchoMsg);
- }
-
- message EchoMsg {
- string msg = 1;
- }"
-}'
-```
-
-An `HTTP/1.1 201 OK` response verifies that the proto object is created successfully.
-
-## Enable `grpc-transcode` Plugin
-
-Create a route with id `quickstart-grpc` and enable the plugin `grpc-transcode`:
-
-```shell
-curl -i "http://127.0.0.1:9180/apisix/admin/routes" -X PUT -d '
-{
- "id": "quickstart-grpc",
- "methods": ["GET"],
- "uri": "/echo",
- "plugins": {
-# highlight-start
- "grpc-transcode": {
-// Annotate 1
- "proto_id": "quickstart-proto",
-// Annotate 2
- "service": "echo.EchoService",
-// Annotate 3
- "method": "Echo"
- }
-# highlight-end
- },
- "upstream": {
- "scheme": "grpc",
- "type": "roundrobin",
- "nodes": {
- "quickstart-grpc-example:50051": 1
- }
- }
-}'
-```
-
-❶ `proto_id`: the proto object which defines gRPC services
-
-❷ `service`: gRPC service `echo.EchoService` in use
-
-❸ `method`: gRPC method `Echo` in use
-
-An `HTTP/1.1 201 OK` response verifies that the route is created and the plugin `grpc-transcode` is enabled successfully.
-
-APISIX now transcodes the RESTful HTTP requests received at `/echo` route to proto requests and forwards them to the upstream gRPC server `quickstart-grpc-example` to invoke the method `echo.EchoService/Echo`. Once the gRPC server responds, APISIX transcodes the proto responses back to RESTful HTTP responses for clients.
-
-## Test gRPC Services in a RESTful Way
-
-Send an HTTP request to `/echo` with parameters defined in `EchoMsg`:
-
-```shell
-curl -i "http://127.0.0.1:9080/echo?msg=Hello"
-```
-
-A valid response similar to the following verifies that the plugin `grpc-transcode` works:
-
-```text
-{"msg":"Hello"}
-```
-
-## Next Steps
-
-Learn more about the `grpc-transcode` plugin in the plugin reference (coming soon).
-
-In addition to transcoding HTTP requests to gRPC requests, APISIX also supports [gRPC-Web](https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-WEB.md), a variation of the [native gRPC protocol](https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md), with the APISIX plugin `grpc-web`.
-
-[//]:
-[//]:
diff --git a/enterprise_versioned_docs/version-3.2.2/introduction.md b/enterprise_versioned_docs/version-3.2.2/introduction.md
deleted file mode 100644
index 5020f70d..00000000
--- a/enterprise_versioned_docs/version-3.2.2/introduction.md
+++ /dev/null
@@ -1,42 +0,0 @@
----
-title: Introduction
-slug: /introduction
-tags:
- - API7 Enterprise
----
-
-## What Is API7 Enterprise?
-
-API gateway is an important component in microservices architecture, which serves as the core entry and exit point for traffic, handling and processing business-related requests. It can effectively address issues such as massive requests and malicious access to ensure business security and stability. API7 Enterprise is an enterprise-level API gateway product built on the top-level project of the Apache Software Foundation. It enables enterprises to conveniently manage, protect, and monitor APIs, and customize them according to their own business needs, adapting flexibly to different application scenarios.
-
-API7 Enterprise is designed to provide secure and reliable API solutions for enterprises, helping them quickly build stable and efficient API management platforms, and improve the performance and availability of their applications.
-
-## Key Features
-
-### API Lifecycle Management
-
-Provides out-of-the-box solutions, including powerful API gateway, developer portal, documentation, and mock testing, to manage the lifecycle of APIs in all aspects.
-
-### Identity Authentication
-
-Built-in role-based access control (RBAC), supports API docking with authentication systems such as Keycloak/OAuth/Okta/two-factor authentication.
-
-### Flexibility and Scalability
-
-Dynamic scaling up and down capabilities, supporting data plane and control plane modes to cope with system peak and burst traffic, in addition to the built-in 100+ plugins, also supports custom plugins.
-
-### Gray Release
-
-Supports more refined gray release and blue-green deployment control through request headers or query strings.
-
-### Observability
-
-Integrated with different metric, tracing, and logging systems, such as DataDog, Prometheus, Grafana, etc.
-
-### Multi-Layer Network
-
-Provides data protection and compliance for your clusters in different regions worldwide.
-
-### Developer Portal
-
-Quickly generate API documents and publish your APIs, allowing third-party developers to use your APIs.
diff --git a/enterprise_versioned_docs/version-3.2.2/introduction/api7-ee-architecture.md b/enterprise_versioned_docs/version-3.2.2/introduction/api7-ee-architecture.md
new file mode 100644
index 00000000..7f6402bc
--- /dev/null
+++ b/enterprise_versioned_docs/version-3.2.2/introduction/api7-ee-architecture.md
@@ -0,0 +1,4 @@
+---
+title: Architecture
+slug: /introduction/introduction
+---
\ No newline at end of file
diff --git a/enterprise_versioned_docs/version-3.2.2/introduction/api7-ee-vs-api7-cloud-apisix.md b/enterprise_versioned_docs/version-3.2.2/introduction/api7-ee-vs-api7-cloud-apisix.md
new file mode 100644
index 00000000..49e6f0bd
--- /dev/null
+++ b/enterprise_versioned_docs/version-3.2.2/introduction/api7-ee-vs-api7-cloud-apisix.md
@@ -0,0 +1,4 @@
+---
+title: API7 EE vs API7 Cloud vs APISIX
+slug: /introduction/api7-ee-vs-api7-cloud-apisix
+---
\ No newline at end of file
diff --git a/enterprise_versioned_docs/version-3.2.2/introduction/what-is-api7-ee.md b/enterprise_versioned_docs/version-3.2.2/introduction/what-is-api7-ee.md
new file mode 100644
index 00000000..4e9b152f
--- /dev/null
+++ b/enterprise_versioned_docs/version-3.2.2/introduction/what-is-api7-ee.md
@@ -0,0 +1,4 @@
+---
+title: What is API7 Enterprise Edition
+slug: /introduction/what-is-api7-ee
+---
\ No newline at end of file
diff --git a/enterprise_versioned_docs/version-3.2.2/key-concepts/consumers.md b/enterprise_versioned_docs/version-3.2.2/key-concepts/consumers.md
new file mode 100644
index 00000000..6b89c5db
--- /dev/null
+++ b/enterprise_versioned_docs/version-3.2.2/key-concepts/consumers.md
@@ -0,0 +1,4 @@
+---
+title: Consumers
+slug: /key-concepts/consumers
+---
\ No newline at end of file
diff --git a/enterprise_versioned_docs/version-3.2.2/key-concepts/gateway-instance.md b/enterprise_versioned_docs/version-3.2.2/key-concepts/gateway-instance.md
new file mode 100644
index 00000000..22925630
--- /dev/null
+++ b/enterprise_versioned_docs/version-3.2.2/key-concepts/gateway-instance.md
@@ -0,0 +1,4 @@
+---
+title: Gateway Instance
+slug: /key-concepts/gateway-instance
+---
\ No newline at end of file
diff --git a/enterprise_versioned_docs/version-3.2.2/background-information/key-concepts/plugins.md b/enterprise_versioned_docs/version-3.2.2/key-concepts/plugins.md
similarity index 55%
rename from enterprise_versioned_docs/version-3.2.2/background-information/key-concepts/plugins.md
rename to enterprise_versioned_docs/version-3.2.2/key-concepts/plugins.md
index 7cc79c9e..c461fcf1 100644
--- a/enterprise_versioned_docs/version-3.2.2/background-information/key-concepts/plugins.md
+++ b/enterprise_versioned_docs/version-3.2.2/key-concepts/plugins.md
@@ -21,7 +21,7 @@ If existing APISIX plugins do not meet your needs, you can also write your own p
## Plugins Installation
-By default, most APISIX plugins are installed as outlined in the default [configuration file](../../reference/configuration-files.md#config-defaultyaml-and-configyaml):
+By default, most APISIX plugins are installed as outlined in the default configuration file.
```yaml
plugins:
@@ -35,7 +35,7 @@ plugins:
...
```
-If you would like to make adjustments to plugins installation, add the customized `plugins` configuration to `config.yaml`, which takes precedence over the `plugins` configuration in `config-default.yaml`, and [reload APISIX](../../reference/apisix-cli.md#apisix-reload) for changes to take effect.
+If you would like to make adjustments to plugins installation, add the customized `plugins` configuration to `config.yaml`, which takes precedence over the `plugins` configuration in `config-default.yaml`, and reload APISIX for changes to take effect.
## Plugins Execution Lifecycle
@@ -55,7 +55,7 @@ To learn more about phases for your custom plugins development, see the plugin d
In general, plugins are executed in the following order:
-1. Plugins in [global rules](./plugin-global-rules.md)
+1. Plugins in global rules
1. plugins in rewrite phase
2. plugins in access phase
@@ -63,7 +63,7 @@ In general, plugins are executed in the following order:
1. plugins in rewrite phase
2. plugins in access phase
-Within each [phase](#plugins-execution-lifecycle), you can optionally define a new priority value in the `_meta.priority` attribute of the plugin, which takes precedence over the default plugins priority during execution. Plugins with higher priority values are executed first. See plugin [common configurations](../../plugins/common-configurations.md#_metapriority) for an example.
+Within each [phase](#plugins-execution-lifecycle), you can optionally define a new priority value in the `_meta.priority` attribute of the plugin, which takes precedence over the default plugins priority during execution. Plugins with higher priority values are executed first. See plugin common configurations for an example.
## Plugins Merging Precedence
@@ -79,9 +79,7 @@ such that if the same plugin has different configurations in different objects,
By default, all plugins are triggered by incoming requests that match the configured rules in routes. However, in some cases, you may want more granular control over plugins execution; that is, conditionally determine which plugins are triggered for requests.
-APISIX allows for dynamic control over plugin execution by applying the `_meta.filter` configuration to the plugins. The configuration supports the evaluation of a wide range of [built-in variables](../../reference/built-in-variables.md) and [APISIX expressions](../../reference/apisix-expressions.md).
-
-See plugin [common configurations](../../plugins/common-configurations.md#_metafilter) for an example.
+APISIX allows for dynamic control over plugin execution by applying the `_meta.filter` configuration to the plugins. The configuration supports the evaluation of a wide range of built-in variables and APISIX expressions.
## Plugins Development
@@ -96,10 +94,61 @@ To learn more about developing plugins, see the plugin development how-to guide
[//]:
+## Plugin Global Rules
+
+In this section, you will learn the basic concept of plugin global rules in APISIX and why you may need them.
+
+### Overview
+
+In APISIX, a _global rule_ object is used to create [plugins](./plugins.md) that are triggered on every incoming request and executed before other plugins locally bound to objects, such as [routes](./routes.md), [services](./services.md), and [consumers](./consumers.md). Certain plugins, such as rate limiting and observability plugins, are frequently enabled globally in order to provide consistent and comprehensive protection for APIs.
+
+The following diagram illustrates an example of enabling key authentication plugin globally for all incoming requests, where `key-auth` plugin is configured in both a global rule and a consumer. The `proxy-rewrite` plugin is configured on a route to modify the request's [HTTP header](https://developer.mozilla.org/en-US/docs/Glossary/HTTP_header), for demonstrating [plugins execution order](./plugins.md#plugins-execution-order):
+
+
+
+

+
+
+
+This configuration ensures that only the authenticated requests are allowed to interact with the upstream service. If a request is sent to APISIX:
+
+* without any key or with a wrong key, the request is rejected.
+* with `global-key` but to a non-existent route, the request is authenticated but APISIX returns an error warning users that the route is not found.
+* with `global-key` to an existing route, the request is first authenticated, then the header of the request is modified by the plugin on the route, and finally the request is forwarded to the upstream service.
+
+The example above used two different plugins in a global rule and a route. If the same plugin is configured in both objects, both instances of the plugin will be [executed sequentially](./plugins.md#plugins-execution-order), rather than overwriting each other.
+
+## Plugin Metadata
+
+In this section, you will learn the basic concept of plugin metadata in APISIX and why you may need them.
+
+### Overview
+
+In APISIX, a _plugin metadata_ object is used to configure the common metadata field(s) of all plugin instances sharing the same plugin name. It is useful when a plugin is enabled across multiple objects and requires a universal update to their metadata fields.
+
+The following diagram illustrates the concept of plugin metadata using two instances of `syslog` plugins on two different routes, as well as a plugin metadata object setting a global `log_format` for the `syslog` plugin:
+
+
+
+
+

+
+
+
+
+Without otherwise specified, the `log_format` on plugin metadata object should apply the same log format uniformly to both `syslog` plugins. However, since the `syslog` plugin on the `/order` route has a different `log_format`, requests visiting this route will generate logs in the `log_format` specified by the plugin in route.
+
+In general, if a field of a plugin is defined in both the plugin metadata and another object, such as a route, the definition on the other object **takes precedence** over the global definition in plugin metadata to provide a more granular level of control.
+
+Plugin metadata objects should only be used for plugins that have metadata fields. For more details on which plugins have metadata fields, please refer to the plugin reference guide (coming soon).
+
+[//]:
+
## Additional Resource(s)
-* Getting Started - [Configure Rate Limiting](../../getting-started/rate-limiting.md)
-* Reference - [Plugin Common Configurations](../../plugins/common-configurations.md)
+* Key Concepts
+ * [Consumers](./consumers.md)
+
[//]:
[//]:
[//]:
diff --git a/enterprise_versioned_docs/version-3.2.2/key-concepts/routes.md b/enterprise_versioned_docs/version-3.2.2/key-concepts/routes.md
new file mode 100644
index 00000000..eeb2611c
--- /dev/null
+++ b/enterprise_versioned_docs/version-3.2.2/key-concepts/routes.md
@@ -0,0 +1,4 @@
+---
+title: Routes
+slug: /key-concepts/routes
+---
\ No newline at end of file
diff --git a/enterprise_versioned_docs/version-3.2.2/key-concepts/services.md b/enterprise_versioned_docs/version-3.2.2/key-concepts/services.md
new file mode 100644
index 00000000..7cbc324f
--- /dev/null
+++ b/enterprise_versioned_docs/version-3.2.2/key-concepts/services.md
@@ -0,0 +1,4 @@
+---
+title: Services
+slug: /key-concepts/services
+---
\ No newline at end of file
diff --git a/enterprise_versioned_docs/version-3.2.2/key-concepts/ssl-certificates.md b/enterprise_versioned_docs/version-3.2.2/key-concepts/ssl-certificates.md
new file mode 100644
index 00000000..58f92a2c
--- /dev/null
+++ b/enterprise_versioned_docs/version-3.2.2/key-concepts/ssl-certificates.md
@@ -0,0 +1,4 @@
+---
+title: SSL Certificates
+slug: /key-concepts/ssl-certificates
+---
\ No newline at end of file
diff --git a/enterprise_versioned_docs/version-3.2.2/key-concepts/upstreams.md b/enterprise_versioned_docs/version-3.2.2/key-concepts/upstreams.md
new file mode 100644
index 00000000..ff9dd7a1
--- /dev/null
+++ b/enterprise_versioned_docs/version-3.2.2/key-concepts/upstreams.md
@@ -0,0 +1,4 @@
+---
+title: Upstreams
+slug: /key-concepts/upstreams
+---
\ No newline at end of file
diff --git a/enterprise_versioned_docs/version-3.2.2/plugins/common-configurations.md b/enterprise_versioned_docs/version-3.2.2/plugins/common-configurations.md
deleted file mode 100644
index 24f49a82..00000000
--- a/enterprise_versioned_docs/version-3.2.2/plugins/common-configurations.md
+++ /dev/null
@@ -1,111 +0,0 @@
----
-title: Common Configurations
-slug: /plugins/common-configurations
----
-
-Plugin common configurations are a set of configuration options that can be applied universally to all APISIX plugins through the `_meta` attribute. They can be used to configure:
-
-* [Plugin conditional execution](#_metafilter)
-* [Plugin execution priorities](#_metapriority)
-* [Plugin disablement](#_metadisable)
-* [Plugin error response](#_metaerror_response)
-
-## `_meta.filter`
-
-You can use `_meta.filter` to configure the conditional execution of plugins based on request parameters. Conditions are created with [APISIX expressions](../reference/apisix-expressions.md) and configured as an array. Plugin only executes when all conditions are met.
-
-For example, the following configuration sets a condition on the request's URI [query parameter](https://en.wikipedia.org/wiki/Query_string). Only requests with the URI query parameter `version` being `v2` will trigger the execution of `proxy-rewrite` plugin, which rewrites the request's URI path to `/api/v2` before forwarding it to the upstream:
-
-```json
-{
- ...,
- "plugins": {
- "proxy-rewrite": {
- "uri": "/api/v2",
- "_meta": {
- "filter": [
- ["arg_version", "==", "v2"]
- ]
- }
- }
- }
-}
-```
-
-Requests not meeting the condition will not have their URI paths rewritten and will be forwarded as-is.
-
-## `_meta.priority`
-
-You can use `_meta.priority` to adjust the execution order of **plugins of the same type** (i.e. global or non-global) **within a given phase**. Once defined, the value will take precedence over the default plugins priority defined in the [configuration file](../reference/configuration-files.md#config-defaultyaml-and-configyaml).
-
-Suppose two plugins that run in the same [phase](../background-information/key-concepts/plugins.md#plugins-execution-lifecycle), `limit-count` and `ip-restriction`, are configured on the same route. `limit-count` has a default priority of 1002 and `ip-restriction` has a default priority of 3000. When a request is sent to the route, `ip-restriction` is executed first as it has a higher default priority value. However, you can run `limit-count` before `ip-restriction` by assigning `_meta.priority` of `limit-count` a priority value higher than 3000, such as:
-
-```json
-{
- ...,
- "plugins": {
- "limit-count": {
- ...,
- "_meta": {
- "priority": 3010
- }
- }
- }
-}
-```
-
-To learn more about the execution order when global and non-global plugins are used together, see [plugins execution order](../background-information/key-concepts/plugins.md#plugins-execution-order).
-
-## `_meta.disable`
-
-You can use `_meta.disable` to disable a plugin without removing the plugin from the object it is bound to entirely.
-
-For example, you can disable the `proxy-rewrite` plugin with the following:
-
-```json
-{
- "plugins": {
- "proxy-rewrite": {
- "_meta": {
- "disable": true
- }
- }
- }
-}
-```
-
-## `_meta.error_response`
-
-You can use `_meta.error_response` to customize the error response returned by a plugin to a fixed value. This could be used to mitigate complications that may arise from the default error response in some cases.
-
-For example, you can customize the error response of the `limit-count` plugin:
-
-```json
-{
- "plugins": {
- "limit-count": {
- "count": 1,
- "time_window": 60,
- "_meta": {
- "error_response": {
- "message": "You have exceeded the rate limiting threshold."
- }
- }
- }
- }
-}
-```
-
-If more than one request is sent within the 60-second window to the route that the plugin binds to, you should see the following response:
-
-```text
-{"message":"You have exceeded the rate limiting threshold."}
-```
-
-## Differentiate From Plugin Metadata
-
-When working with plugins, it is important to understand the distinctions between the `_meta` common configurations, as outlined in this document, and the [plugin metadata](../background-information/key-concepts/plugin-metadata.md). These two concepts serve different purposes and should not be mixed.
-
-While the `_meta` common configurations refer to configuration options that are available for all APISIX plugins, plugin metadata only applies to plugins that have metadata attributes. These metadata attributes could also be configured with the Admin API plugin metadata resource.
-
-See [plugin metadata](../background-information/key-concepts/plugin-metadata.md) to learn more.
diff --git a/enterprise_versioned_docs/version-3.2.2/plugins/traffic-management/graphql-limit-count.md b/enterprise_versioned_docs/version-3.2.2/plugins/traffic-management/graphql-limit-count.md
deleted file mode 100644
index 4410b381..00000000
--- a/enterprise_versioned_docs/version-3.2.2/plugins/traffic-management/graphql-limit-count.md
+++ /dev/null
@@ -1,454 +0,0 @@
----
-title: graphql-limit-count
-slug: /plugins/graphql-limit-count
-sidebar_label: graphql-limit-count
-sidebar_position: 1
----
-
-import EnterpriseLabel from '@site/src/MDXComponents/EnterpriseLabel';
-
-# graphql-limit-count
-
-The `graphql-limit-count` plugin uses a fixed window algorithm to limit the rate of GraphQL requests based on the depth of the GraphQL [queries](https://graphql.org/learn/queries/) or [mutations](https://graphql.org/learn/queries#mutations).
-
-In GraphQL, the depth refers to the number of nesting levels in a query or mutation. The following is an example query with a depth of 3:
-
-```text
-{
- a {
- b {
- c
- }
- }
-}
-```
-
-The `graphql-limit-count` plugin rate limits by a quota of depth within a given time interval. For example, if the quota of count is set to 4 within a 30-second interval, requests with a depth of 3 will be allowed. The remaining quota within the same 30-second is 1. If a request of depth 2 is sent within the same 30-second interval, it will be rejected.
-
-## Attributes
-
-See plugin [common configurations](../common-configurations.md) for configuration options available to all plugins.
-
-| Name | Type | Required | Default | Valid Values | Description |
-| ------------------- | ------- | ---------- | ------------- | --------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
-| count | `integer` | true | | > 0 | The maximum request depth allowed within a given time interval. |
-| time_window | `integer` | true | | > 0 | The time interval corresponding to the rate limiting `count` in seconds. |
-| key_type | `string` | false | "var" | ["var", "var_combination", "constant"] | The type of key. If it is `var`, the `key` is interpreted a variable. If it is `var_combination`, the `key` is interpreted as a combination of variables. If it is `constant`, the `key` is interpreted as a constant. |
-| key | `string` | false | "remote_addr" | | The key to count requests by. |
-| rejected_code | `integer` | false | 503 | [200,...,599] | The HTTP status code returned when a request is rejected for exceeding the threshold. |
-| rejected_msg | `string` | false | | non-empty | The response body returned when a request is rejected for exceeding the threshold. |
-| policy | `string` | false | "local" | ["local", "redis", "redis-cluster"] | The policy for rate limiting counter. If it is `local`, the counter is stored in memory locally. If it is `redis`, the counter is stored on a Redis instance. If it is `redis-cluster`, the counter is stored in a Redis cluster. |
-| allow_degradation | `boolean` | false | false | | If `true`, allow APISIX to continue handling requests without the plugin when the plugin or its dependencies become unavailable. |
-| show_limit_quota_header | `boolean` | false | true | | If `true`, include `X-RateLimit-Limit` to show the total quota and `X-RateLimit-Remaining` to show the remaining quota in the response header. |
-| group | `string` | false | | non-empty | The `group` ID for the plugin, such that routes of the same `group` can share the same rate limiting counter. |
-| redis_host | `string` | false | | | The address of the Redis node. Required when `policy` is `redis`. |
-| redis_port | `integer` | false | 6379 | [1,...] | The port of the Redis node when `policy` is `redis`. |
-| redis_password | `string` | false | | | The password of the Redis node when `policy` is `redis` or `redis-cluster`. |
-| redis_database | `integer` | false | 0 | >= 0 | The database number in Redis when `policy` is `redis`. |
-| redis_ssl | `boolean` | false | false | | If `true`, use SSL to connect to Redis cluster when `policy` is `redis`. |
-| redis_ssl_verify | `boolean` | false | false | | If `true`, verify the server SSL certificate when `policy` is `redis`. |
-| redis_timeout | `integer` | false | 1000 | [1,...] | The Redis timeout value in milliseconds when `policy` is `redis` or `redis-cluster`. |
-| redis_cluster_nodes | `array[string]` | false | | | The list of the Redis cluster nodes with at least two addresses. Required when `policy` is `redis-cluster`. |
-| redis_cluster_name | `string` | false | | | The name of the Redis cluster. Required when `policy` is `redis-cluster`. |
-| redis_cluster_ssl | `boolean` | false | false | | If `true`, use SSL to connect to Redis cluster when `policy` is `redis-cluster`. |
-| redis_cluster_ssl_verify | `boolean` | false | false | | If `true`, verify the server SSL certificate when `policy` is `redis-cluster`. |
-
-## Examples
-
-The examples below use [GitHub GraphQL API](https://docs.github.com/en/graphql) endpoint as an upstream and demonstrate how you can configure `graphql-limit-count` for different scenarios.
-
-To follow along, create a GitHub [personal access token](https://docs.github.com/en/authentication/keeping-your-account-and-data-secure/managing-your-personal-access-tokens) with the appropriate scopes for the resources you want to interact with.
-
-### Apply Rate Limiting by Remote Address
-
-The following example demonstrates the rate limiting of GraphQL requests by a single variable, `remote_addr`.
-
-Create a route with `graphql-limit-count` plugin that allows for a quota of depth 2 within a 30-second window per remote address:
-
-```shell
-curl "http://127.0.0.1:9180/apisix/admin/routes/1" -X PUT \
- -H "X-API-KEY: ${ADMIN_API_KEY}" \
- -d '{
- "uri": "/graphql",
- "plugins": {
- # highlight-start
- "graphql-limit-count": {
- "count": 2,
- "time_window": 30,
- "rejected_code": 429,
- "key_type": "var",
- "key": "remote_addr"
- }
- # highlight-end
- },
- "upstream": {
- "type": "roundrobin",
- "pass_host": "node",
- "scheme": "https",
- "nodes": {
- "api.github.com:443": 1
- }
- }
- }'
-```
-
-#### Verify With GraphQL Query
-
-Send a request with a GraphQL query of depth 2 to verify:
-
-```shell
-curl -i "http://127.0.0.1:9080/graphql" -X POST \
- -H "Content-Type: application/json" \
- -H "Authorization: Bearer ${GH_ACCESS_TOKEN}" \
- -d '{"query": "query {viewer{login}}"}'
-```
-
-You should see an `HTTP/1.1 200 OK` response with the corresponding response body.
-
-The request has consumed all the quota allowed for the time window. If you send the request again within the same 30-second time interval, you should receive an `HTTP/1.1 429 Too Many Requests` response, indicating the request surpasses the quota threshold.
-
-#### Verify With GraphQL Mutation
-
-You can also send a request with a GraphQL mutation of depth 3 to verify:
-
-```shell
-curl -i "http://127.0.0.1:9080/graphql" -X POST \
- -H "Content-Type: application/json" \
- -H "Authorization: Bearer ${GH_ACCESS_TOKEN}" \
- -d '{"query": "mutation AddReactionToIssue {addReaction(input:{subjectId:\"MDU6SXNzdWUyMzEzOTE1NTE=\",content:HOORAY}) {reaction {content} subject {id}}}"}'
-```
-
-You should see an `HTTP/1.1 429 Too Many Requests` response at any time, as depth 3 always surpasses the quota of depth 2.
-
-### Apply Rate Limiting by Remote Address and Consumer Name
-
-The following example demonstrates the rate limiting of GraphQL requests by a combination of variables, `remote_addr` and `consumer_name`. It allows for a quota of depth 2 within a 30-second window per remote address and for each [consumer](../../background-information/key-concepts/consumers.md).
-
-Create two consumers, `jane` and `john`, and enable [key authentication](../../getting-started/key-authentication.md):
-
-```shell
-curl "http://127.0.0.1:9180/apisix/admin/consumers" -X PUT \
- -H "X-API-KEY: ${ADMIN_API_KEY}" \
- -d '{
- "username": "jane",
- "plugins": {
- "key-auth": {
- "key": "jane-key"
- }
- }
- }'
-```
-
-```shell
-curl "http://127.0.0.1:9180/apisix/admin/consumers" -X PUT \
- -H "X-API-KEY: ${ADMIN_API_KEY}" \
- -d '{
- "username": "john",
- "plugins": {
- "key-auth": {
- "key": "john-key"
- }
- }
- }'
-```
-
-Create a route with `key-auth` and `graphql-limit-count` plugins:
-
-```shell
-curl "http://127.0.0.1:9180/apisix/admin/routes/1" -X PUT \
- -H "X-API-KEY: ${ADMIN_API_KEY}" \
- -d '{
- "uri": "/graphql",
- "plugins": {
- # highlight-start
- // Annotate 1
- "key-auth": {},
- "graphql-limit-count": {
- "count": 2,
- "time_window": 30,
- "rejected_code": 429,
- // Annotate 2
- "key_type": "var_combination",
- // Annotate 3
- "key": "$remote_addr $consumer_name"
- }
- # highlight-end
- },
- "upstream": {
- "type": "roundrobin",
- "pass_host": "node",
- "scheme": "https",
- "nodes": {
- "api.github.com:443": 1
- }
- }
- }'
-```
-
-❶ `key-auth`: enable key authentication on the route.
-
-❷ `key_type`: set to `var_combination` to interpret the `key` is as a combination of variables.
-
-❸ `key`: set to `$remote_addr $consumer_name` to apply rate limiting quota by remote address and consumer.
-
-Send a request with a GraphQL query of depth 2 as the consumer `jane`:
-
-```shell
-curl -i "http://127.0.0.1:9080/graphql" -X POST \
- -H "Content-Type: application/json" \
- -H "Authorization: Bearer ${GH_ACCESS_TOKEN}" \
- -H 'apikey: jane-key' \
- -d '{"query": "query {viewer{login}}"}'
-```
-
-You should see an `HTTP/1.1 200 OK` response with the corresponding response body.
-
-This request has consumed all the quota set for the time window. If you send the same request as the consumer `jane` within the same 30-second time interval, you should receive an `HTTP/1.1 429 Too Many Requests` response, indicating the request surpasses the quota threshold.
-
-Send the same request as the consumer `john` within the same 30-second time interval:
-
-```shell
-curl -i "http://127.0.0.1:9080/graphql" -X POST \
- -H "Content-Type: application/json" \
- -H "Authorization: Bearer ${GH_ACCESS_TOKEN}" \
- -H 'apikey: john-key' \
- -d '{"query": "query {viewer{login}}"}'
-```
-
-You should see an `HTTP/1.1 200 OK` response with the corresponding response body, indicating the request is not rate limited.
-
-Send the same request as the consumer `john` again within the same 30-second time interval, you should receive an `HTTP/1.1 429 Too Many Requests` response.
-
-This verifies the plugin rate limits by the combination of variables, `remote_addr` and `consumer_name`.
-
-### Share Quota Among Routes
-
-The following example demonstrates the sharing of GraphQL rate limiting quota Among multiple routes by configuring the `group` of the `graphql-limit-count` plugin.
-
-Note that the configurations of the `graphql-limit-count` plugin of the same `group` should be identical. To avoid update anomalies and repetitive configurations, you can create a [service](../../background-information/key-concepts/services.md) with `graphql-limit-count` plugin and upstream for routes to connect to.
-
-Create a service with an ID of `1`:
-
-```shell
-curl http://127.0.0.1:9180/apisix/admin/services/1 -X PUT \
- -H "X-API-KEY: ${ADMIN_API_KEY}" \
- -d '{
- "plugins": {
- # highlight-start
- "graphql-limit-count": {
- "count": 2,
- "time_window": 30,
- "rejected_code": 429,
- "group": "srv1"
- }
- # highlight-end
- },
- "upstream": {
- "type": "roundrobin",
- "pass_host": "node",
- "scheme": "https",
- "nodes": {
- "api.github.com:443": 1
- }
- }
- }'
-```
-
-Create two routes and configure their `service_id` to be `1`, so that they share the same configurations for the plugin and upstream:
-
-```shell
-curl "http://127.0.0.1:9180/apisix/admin/routes/1" -X PUT \
- -H "X-API-KEY: ${ADMIN_API_KEY}" \
- -d '{
- # highlight-start
- "service_id": "1",
- # highlight-end
- "uri": "/graphql1",
- "plugins": {
- "proxy-rewrite": {
- "uri": "/graphql"
- }
- }
- }'
-```
-
-```shell
-curl http://127.0.0.1:9180/apisix/admin/routes/2 -X PUT \
- -H "X-API-KEY: ${ADMIN_API_KEY}" \
- -d '{
- # highlight-start
- "service_id": "1",
- # highlight-end
- "uri": "/graphql2",
- "plugins": {
- "proxy-rewrite": {
- "uri": "/graphql"
- }
- }
- }'
-```
-
-:::note
-
-The `proxy-rewrite` plugin is used to rewrite the URI to `/graphql` so that requests are forwarded to the correct endpoint.
-
-[//]:
-
-:::
-
-Send a request with a GraphQL query of depth 2 to route `/graphql1`:
-
-```shell
-curl -i "http://127.0.0.1:9080/graphql1" -X POST \
- -H "Content-Type: application/json" \
- -H "Authorization: Bearer ${GH_ACCESS_TOKEN}" \
- -d '{"query": "query {viewer{login}}"}'
-```
-
-You should see an `HTTP/1.1 200 OK` response with the corresponding response body.
-
-Send the same query of depth 2 to route `/graphql2` within the same 30-second time interval:
-
-```shell
-curl -i "http://127.0.0.1:9080/graphql2" -X POST \
- -H "Content-Type: application/json" \
- -H "Authorization: Bearer ${GH_ACCESS_TOKEN}" \
- -d '{"query": "query {viewer{login}}"}'
-```
-
-You should receive an `HTTP/1.1 429 Too Many Requests` response, which verifies the two routes share the same rate limiting quota.
-
-### Share Quota Among APISIX Nodes with a Redis Server
-
-The following example demonstrates the rate limiting of GraphQL requests across multiple APISIX nodes with a Redis server, such that different APISIX nodes share the same rate limiting quota.
-
-On each APISIX instance, create a route with the following configurations. Adjust the address of the Admin API accordingly.
-
-```shell
-curl "http://127.0.0.1:9180/apisix/admin/routes/1" -X PUT \
- -H "X-API-KEY: ${ADMIN_API_KEY}" \
- -d '{
- "uri": "/graphql",
- "plugins": {
- # highlight-start
- "graphql-limit-count": {
- "count": 2,
- "time_window": 30,
- "rejected_code": 429,
- "key": "remote_addr",
- // Annotate 1
- "policy": "redis",
- // Annotate 2
- "redis_host": "192.168.xxx.xxx",
- // Annotate 3
- "redis_port": 6379,
- // Annotate 4
- "redis_password": "p@ssw0rd",
- // Annotate 5
- "redis_database": 1
- }
- # highlight-end
- },
- "upstream": {
- "type": "roundrobin",
- "pass_host": "node",
- "scheme": "https",
- "nodes": {
- "api.github.com:443": 1
- }
- }
- }'
-```
-
-❶ `policy`: set to `redis` to use a Redis instance for rate limiting.
-
-❷ `redis_host`: set to Redis instance IP address.
-
-❸ `redis_port`: set to Redis instance listening port.
-
-❹ `redis_password`: set to the password of the Redis instance, if any.
-
-❺ `redis_database`: set to the database number in the Redis instance.
-
-Send a request with a GraphQL query of depth 2 to an APISIX instance:
-
-```shell
-curl -i "http://127.0.0.1:9080/graphql" -X POST \
- -H "Content-Type: application/json" \
- -H "Authorization: Bearer ${GH_ACCESS_TOKEN}" \
- -d '{"query": "query {viewer{login}}"}'
-```
-
-You should see an `HTTP/1.1 200 OK` response with the corresponding response body.
-
-Send the same request to a different APISIX instance within the same 30-second time interval, you should receive an `HTTP/1.1 429 Too Many Requests` response, verifying routes configured in different APISIX nodes share the same quota.
-
-### Share Quota Among APISIX Nodes with a Redis Cluster
-
-You can also use a Redis cluster to apply the same quota across multiple APISIX nodes, such that different APISIX nodes share the same rate limiting quota.
-
-Ensure that your Redis instances are running in [cluster mode](https://redis.io/docs/management/scaling/#create-and-use-a-redis-cluster). A minimum of two nodes are required for the `graphql-limit-count` plugin configurations.
-
-On each APISIX instance, create a route with the following configurations. Adjust the address of the Admin API accordingly.
-
-```shell
-curl "http://127.0.0.1:9180/apisix/admin/routes/1" -X PUT \
- -H "X-API-KEY: ${ADMIN_API_KEY}" \
- -d '{
- "uri": "/graphql",
- "plugins": {
- # highlight-start
- "graphql-limit-count": {
- "count": 2,
- "time_window": 30,
- "rejected_code": 429,
- "key": "remote_addr",
- // Annotate 1
- "policy": "redis-cluster",
- // Annotate 2
- "redis_cluster_nodes": [
- "192.168.xxx.xxx:6379",
- "192.168.xxx.xxx:16379"
- ],
- // Annotate 3
- "redis_password": "p@ssw0rd",
- // Annotate 4
- "redis_cluster_name": "redis-cluster-1",
- // Annotate 5
- "redis_cluster_ssl": true
- }
- # highlight-end
- },
- "upstream": {
- "type": "roundrobin",
- "pass_host": "node",
- "scheme": "https",
- "nodes": {
- "api.github.com:443": 1
- }
- }
- }'
-```
-
-❶ `policy`: set to `redis-cluster` to use a Redis cluster for rate limiting.
-
-❷ `redis_cluster_nodes`: set to Redis node addresses in the Redis cluster.
-
-❸ `redis_password`: set to the password of the Redis cluster, if any.
-
-❹ `redis_cluster_name`: set to the Redis cluster name.
-
-➎ `redis_cluster_ssl`: enable SSL/TLS communication with Redis cluster.
-
-Send a request with a GraphQL query of depth 2 to an APISIX instance:
-
-```shell
-curl -i "http://127.0.0.1:9080/graphql" -X POST \
- -H "Content-Type: application/json" \
- -H "Authorization: Bearer ${GH_ACCESS_TOKEN}" \
- -d '{"query": "query {viewer{login}}"}'
-```
-
-You should see an `HTTP/1.1 200 OK` response with the corresponding response body.
-
-Send the same request to a different APISIX instance within the same 30-second time interval, you should receive an `HTTP/1.1 429 Too Many Requests` response, verifying routes configured in different APISIX nodes share the same quota.
diff --git a/enterprise_versioned_docs/version-3.2.2/plugins/traffic-management/graphql-proxy-cache.md b/enterprise_versioned_docs/version-3.2.2/plugins/traffic-management/graphql-proxy-cache.md
deleted file mode 100644
index 23d3dd20..00000000
--- a/enterprise_versioned_docs/version-3.2.2/plugins/traffic-management/graphql-proxy-cache.md
+++ /dev/null
@@ -1,217 +0,0 @@
----
-title: graphql-proxy-cache
-slug: /plugins/graphql-proxy-cache
-sidebar_label: graphql-proxy-cache
-sidebar_position: 1
----
-
-import EnterpriseLabel from '@site/src/MDXComponents/EnterpriseLabel';
-
-# graphql-proxy-cache
-
-The `graphql-proxy-cache` plugin provides the capability to cache responses for GraphQL queries. It uses [MD5](https://en.wikipedia.org/wiki/MD5) algorithm to generate cache key based on the plugin configurations and GraphQL queries. The plugin supports both disk-based and memory-based caching options to cache for [GET](https://graphql.org/learn/serving-over-http/#get-request) and [POST](https://graphql.org/learn/serving-over-http/#post-request) GraphQL requests.
-
-If a request contains a [mutation](https://graphql.org/learn/queries#mutations) operation, the plugin will not cache the data. Instead, it adds an `Apisix-Cache-Status: BYPASS` header to the response to show that the request bypasses the caching mechanism.
-
-## Attributes
-
-See plugin [common configurations](../common-configurations.md) for configuration options available to all plugins.
-
-| Name | Type | Required | Default | Valid Values | Description |
-| ------------------ | -------------- | ------ | ------------------------- | ------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------- |
-| cache_strategy | `string` | false | disk | ["disk","memory"] | Caching strategy. Cache on disk or in memory. |
-| cache_zone | `string` | false | disk_cache_one | | Cache zone used with the caching strategy. The value should match one of the cache zones defined in the [configuration files](#static-configurations) and should correspond to the caching strategy. For example, when using the in-memory caching strategy, you should use an in-memory cache zone. |
-| cache_ttl | `integer` | false | 300 | >=1 | Cache time to live (TTL) in seconds when caching in memory. |
-
-
-
-:::info
-
-To adjust the TTL when caching on disk, update `cache_ttl` in the [configuration files](#static-configurations). The TTL value is evaluated in conjunction with the values in the response headers [`Cache-Control`](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Cache-Control) and [`Expires`](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Expires) received from the upstream service.
-
-:::
-
-## Static Configurations
-
-By default, values such as `cache_ttl` when caching on disk and cache `zones` are pre-configured in the default [configuration file](../../reference/configuration-files.md#config-defaultyaml-and-configyaml). For example:
-
-```yaml
-apisix:
- proxy_cache:
- cache_ttl: 10s # for caching on disk
- zones:
- - name: disk_cache_one
- memory_size: 50m
- disk_size: 1G
- disk_path: /tmp/disk_cache_one
- cache_levels: 1:2
- # - name: disk_cache_two
- # memory_size: 50m
- # disk_size: 1G
- # disk_path: "/tmp/disk_cache_two"
- # cache_levels: "1:2"
- - name: memory_cache
- memory_size: 50m
-```
-
-To customize, add the corresponding configurations to `config.yaml`, which takes precedence over the configurations in `config-default.yaml`. [Reload APISIX](../../reference/apisix-cli.md#apisix-reload) for changes to take effect.
-
-## Examples
-
-The examples below use [GitHub GraphQL API](https://docs.github.com/en/graphql) as an upstream and demonstrate how you can configure `graphql-proxy-cache` for different scenarios.
-
-To follow along, create a GitHub [personal access token](https://docs.github.com/en/authentication/keeping-your-account-and-data-secure/managing-your-personal-access-tokens) with the appropriate scopes for the resources you want to interact with.
-
-### Cache Data on Disk
-
-The following example demonstrates how you can use `graphql-proxy-cache` plugin on a route to cache data on disk.
-
-Create a route with the `graphql-proxy-cache` plugin with the default configuration to cache data on disk:
-
-```shell
-curl "http://127.0.0.1:9180/apisix/admin/routes/1" -X PUT \
- -H "X-API-KEY: ${ADMIN_API_KEY}" \
- -d '{
- "uri": "/graphql",
- "plugins": {
- # highlight-start
- "graphql-proxy-cache": {}
- # highlight-end
- },
- "upstream": {
- "type": "roundrobin",
- "pass_host": "node",
- "scheme": "https",
- "nodes": {
- "api.github.com:443": 1
- }
- }
- }'
-```
-
-Send a request with a GraphQL query to verify:
-
-```shell
-curl -i "http://127.0.0.1:9080/graphql" -X POST \
- -H "Content-Type: application/json" \
- -H "Authorization: Bearer ${GH_ACCESS_TOKEN}" \
- -d '{"query": "query {viewer{login}}"}'
-```
-
-You should see an `HTTP/1.1 200 OK` response with the following headers, showing the plugin is successfully enabled:
-
-```text
-APISIX-Cache-Key: e9c1624ee35f792548512ff9f6ff1bfa
-Apisix-Cache-Status: MISS
-```
-
-As there is no cache available before the first response, `Apisix-Cache-Status: MISS` is shown.
-
-Send the same request again within the cache TTL window. You should see an `HTTP/1.1 200 OK` response with the following headers, showing the cache is hit:
-
-```text
-APISIX-Cache-Key: e9c1624ee35f792548512ff9f6ff1bfa
-Apisix-Cache-Status: HIT
-```
-
-Wait for the cache to expire after the TTL and send the same request again. You should see an `HTTP/1.1 200 OK` response with the following headers, showing the cache has expired:
-
-```text
-APISIX-Cache-Key: e9c1624ee35f792548512ff9f6ff1bfa
-Apisix-Cache-Status: EXPIRED
-```
-
-### Cache Data in Memory
-
-The following example demonstrates how you can use `graphql-proxy-cache` plugin on a route to cache data in memory.
-
-Create a route with `graphql-proxy-cache` enabled and configure it use memory-based caching:
-
-```shell
-curl "http://127.0.0.1:9180/apisix/admin/routes/1" -X PUT \
- -H "X-API-KEY: ${ADMIN_API_KEY}" \
- -d '{
- "uri": "/graphql",
- "plugins": {
- # highlight-start
- "graphql-proxy-cache": {
- // Annotate 1
- "cache_strategy": "memory",
- // Annotate 2
- "cache_zone": "memory_cache",
- // Annotate 3
- "cache_ttl": 10
- }
- # highlight-end
- },
- "upstream": {
- "type": "roundrobin",
- "pass_host": "node",
- "scheme": "https",
- "nodes": {
- "api.github.com:443": 1
- }
- }
- }'
-```
-
-❶ `cache_strategy`: set to `memory` for in-memory setting.
-
-❷ `cache_zone`: set to the name of an in-memory cache zone.
-
-❸ `cache_ttl`: set the time to live for the in-memory cache.
-
-Send a request with a GraphQL query to verify:
-
-```shell
-curl "http://127.0.0.1:9080/graphql" -i -X POST \
- -H "Content-Type: application/json" \
- -H "Authorization: Bearer ${GH_ACCESS_TOKEN}" \
- -d '{"query": "query {viewer{login}}"}'
-```
-
-You should see an `HTTP/1.1 200 OK` response with the following headers, showing the plugin is successfully enabled:
-
-```text
-APISIX-Cache-Key: a661316c4b1b70ae2db5347743dec6b6
-Apisix-Cache-Status: MISS
-```
-
-As there is no cache available before the first response, `Apisix-Cache-Status: MISS` is shown.
-
-Send the same request again within the cache TTL window. You should see an `HTTP/1.1 200 OK` response with the following headers, showing the cache is hit:
-
-```text
-APISIX-Cache-Key: a661316c4b1b70ae2db5347743dec6b6
-Apisix-Cache-Status: HIT
-```
-
-### Remove Cache Manually
-
-While most of the time it is not necessary, there may be situations where you would want to manually remove cached data.
-
-The following example demonstrates how you can use the `public-api` plugin to expose the `/apisix/plugin/graphql-proxy-cache/{cache_strategy}/{route_id}/{key}` endpoint created by the `graphql-proxy-cache` plugin to manually remove cache.
-
-Create a route that matches the URI `/apisix/plugin/graphql-proxy-cache/*`:
-
-```shell
-curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \
- -H "X-API-KEY: ${ADMIN_API_KEY}" \
- -d '{
- "id": "graphql-cache-purge",
- "uri": "/apisix/plugin/graphql-proxy-cache/*",
- "plugins": {
- "public-api": {}
- }
- }'
-```
-
-Send a PURGE request to remove data cached on disk for route `1`:
-
-```shell
-curl -i "http://127.0.0.1:9080/apisix/plugin/graphql-proxy-cache/disk/1/e9c1624ee35f792548512ff9f6ff1bfa" -X PURGE
-```
-
-An `HTTP/1.1 200 OK` response verifies that the cache corresponding to the key is successfully removed.
-
-If you send the same request again, you should see an `HTTP/1.1 404 Not Found` response, showing there is no cache on disk with this cache key after the cache removal.
diff --git a/enterprise_versioned_docs/version-3.2.2/plugins/traffic-management/traffic-label.md b/enterprise_versioned_docs/version-3.2.2/plugins/traffic-management/traffic-label.md
deleted file mode 100644
index 884faa0b..00000000
--- a/enterprise_versioned_docs/version-3.2.2/plugins/traffic-management/traffic-label.md
+++ /dev/null
@@ -1,316 +0,0 @@
----
-title: traffic-label
-slug: /plugins/traffic-label
-sidebar_label: traffic-label
-sidebar_position: 1
----
-
-import EnterpriseLabel from '@site/src/MDXComponents/EnterpriseLabel';
-
-# traffic-label
-
-The `traffic-label` plugin labels traffic based on user-defined rules and takes actions based on labels and the associated weights for actions. It provides a granular approach to traffic management, making it easy to conditionally action on requests with flexibility and precision.
-
-## Attributes
-
-See plugin [common configurations](../common-configurations.md) for configuration options available to all plugins.
-
-| Name | Type | Required | Default | Description |
-| ---- | ---- | --------- | ------- | ----------- |
-| rules | `array[object]` | true | | An array of one or more pairs of matching conditions and actions to be executed. |
-| rules.match | `array[array]` | true | | An array of one or more matching conditions in the form of [APISIX expressions](../../reference/apisix-expressions.md). |
-| rules.actions | `array[object]` | true | | An array of one or more actions to be executed when a condition is successfully matched. |
-| rules.actions.set_headers | `object` | false | | One or more request headers to apply to requests in the format of `{"name": "value", ...}`, where `value` could be a [built-in variable](../../reference/built-in-variables.md). If a header of the same name already exists, it will be overwritten. |
-| rules.actions.weight | `integer` | false | 1 | The weight of action distribution. See [action weight](#create-weighted-actions) for a detailed calculation. |
-
-:::info
-
-Rules are evaluated in sequential order. If the condition of a rule is matched, the associated actions will execute and the subsequent rules will be omitted. See [multiple matching rules](#define-multiple-matching-rules) for an example.
-
-:::
-
-## Examples
-
-The examples below demonstrate how you can configure `traffic-label` on a route in different scenarios.
-
-### Define a Single Matching Condition
-
-The following example demonstrates a simple rule with one matching condition and one associated action. If the URI of the request is `/headers`, the plugin will add the header `"X-Server-Id": "100"` to the request.
-
-```shell
-curl http://127.0.0.1:9180/apisix/admin/routes/1 \
--H 'X-API-KEY: ${ADMIN_API_KEY}' -X PUT -d '
-{
- "uri":"/headers",
- "plugins":{
- # highlight-start
- "traffic-label": {
- "rules": [
- {
- "match": [
- ["uri", "==", "/headers"]
- ],
- "actions": [
- {
- "set_headers": {
- "X-Server-Id": 100
- }
- }
- ]
- }
- ]
- }
- # highlight-end
- },
- "upstream":{
- "type":"roundrobin",
- "nodes":{
- "httpbin.org:80":1
- }
- }
-}'
-```
-
-Send a request to verify:
-
-```shell
-curl http://127.0.0.1:9080/headers
-```
-
-You should see a response similar to the following:
-
-```text
-{
- "headers": {
- "Accept": "*/*",
- ...
- "X-Server-Id": "100"
- }
-}
-```
-
-### Define Multiple Matching Conditions with Logical Operators
-
-You can build more complex matching conditions with [logical operators](../../reference/apisix-expressions.md#logical-operators).
-
-The following example demonstrates a rule with two matching conditions logically grouped by `OR` and one associated action. If one of the conditions is met, the plugin will add the header `"X-Server-Id": "100"` to the request.
-
-```shell
-curl http://127.0.0.1:9180/apisix/admin/routes/1 \
--H 'X-API-KEY: ${ADMIN_API_KEY}' -X PUT -d '
-{
- "uri":"/headers",
- "plugins":{
- # highlight-start
- "traffic-label": {
- "rules": [
- {
- "match": [
- "OR",
- ["arg_version", "==", "v1"],
- ["arg_env", "==", "dev"]
- ],
- "actions": [
- {
- "set_headers": {
- "X-Server-Id": 100
- }
- }
- ]
- }
- ]
- }
- # highlight-end
- },
- "upstream":{
- "type":"roundrobin",
- "nodes":{
- "httpbin.org:80":1
- }
- }
-}'
-```
-
-Send a request to verify:
-
-```shell
-curl http://127.0.0.1:9080/headers?env=dev
-```
-
-You should see a response similar to the following:
-
-```text
-{
- "headers": {
- "Accept": "*/*",
- ...
- "X-Server-Id": "100"
- }
-}
-```
-
-If you send a request that does not match any of the conditions, you will not see `"X-Server-Id": "100"` added to the request header.
-
-### Create Weighted Actions
-
-The following example demonstrates a rule with one matching condition and multiple weighted actions, where incoming requests are distributed proportionally based on the weights.
-
-If a `weight` is not associated with any action, this portion of the requests will not have any action performed on them.
-
-```shell
-curl http://127.0.0.1:9180/apisix/admin/routes/1 \
--H 'X-API-KEY: ${ADMIN_API_KEY}' -X PUT -d '
-{
- "uri":"/headers",
- "plugins":{
- # highlight-start
- "traffic-label": {
- "rules": [
- {
- "match": [
- ["uri", "==", "/headers"]
- ],
- "actions": [
- {
- "set_headers": {
- "X-Server-Id": 100
- },
- // Annotate 1
- "weight": 3
- },
- {
- "set_headers": {
- "X-API-Version": "v2"
- },
- // Annotate 2
- "weight": 2
- },
- {
- // Annotate 3
- "weight": 5
- }
- ]
- }
- ]
- }
- # highlight-end
- },
- "upstream":{
- "type":"roundrobin",
- "nodes":{
- "httpbin.org:80":1
- }
- }
-}'
-```
-
-The proportion of times each action is executed is determined by the weight of the action relative to the total weight of all actions listed under the `actions` field. Here, the total weight is calculated as the sum of all action weights: 3 + 2 + 5 = 10.
-
-Therefore:
-
-❶ 30% of the requests should have the `X-Server-Id: 100` request header.
-
-❷ 20% of the requests should have the `X-API-Version: v2` request header.
-
-❸ 50% of the requests should not have any action performed on them.
-
-Generate 50 consecutive requests to verify the weighted actions:
-
-```shell
-resp=$(seq 50 | xargs -I{} curl "http://127.0.0.1:9080/headers" -sL) && \
- count_w3=$(echo "$resp" | grep "X-Server-Id" | wc -l) && \
- count_w2=$(echo "$resp" | grep "X-API-Version" | wc -l) && \
- echo X-Server-Id: $count_w3, X-API-Version: $count_w2
-```
-
-The response shows that headers are added to requests in a weighted manner:
-
-```text
-X-Server-Id: 15, X-API-Version: 10
-```
-
-### Define Multiple Matching Rules
-
-The following example demonstrates the use of multiple rules, each with their matching condition and action.
-
-```shell
-curl http://127.0.0.1:9180/apisix/admin/routes/1 \
--H 'X-API-KEY: ${ADMIN_API_KEY}' -X PUT -d '
-{
- "uri":"/headers",
- "plugins":{
- # highlight-start
- "traffic-label": {
- "rules": [
- {
- "match": [
- ["arg_version", "==", "v1"]
- ],
- "actions": [
- {
- "set_headers": {
- "X-Server-Id": 100
- }
- }
- ]
- },
- {
- "match": [
- ["arg_version", "==", "v2"]
- ],
- "actions": [
- {
- "set_headers": {
- "X-Server-Id": 200
- }
- }
- ]
- }
- ]
- }
- # highlight-end
- },
- "upstream":{
- "type":"roundrobin",
- "nodes":{
- "httpbin.org:80":1
- }
- }
-}'
-```
-
-Send a request to `/headers?version=v1` to verify:
-
-```shell
-curl http://127.0.0.1:9080/headers?version=v1
-```
-
-You should see a response similar to the following:
-
-```text
-{
- "headers": {
- "Accept": "*/*",
- ...
- "X-Server-Id": "100"
- }
-}
-```
-
-Send a request to `/headers?version=v2` to verify:
-
-```shell
-curl http://127.0.0.1:9080/headers?version=v2
-```
-
-You should see a response similar to the following:
-
-```text
-{
- "headers": {
- "Accept": "*/*",
- ...
- "X-Server-Id": "200"
- }
-}
-```
diff --git a/enterprise_versioned_docs/version-3.2.2/reference/apisix-cli.md b/enterprise_versioned_docs/version-3.2.2/reference/apisix-cli.md
deleted file mode 100644
index b6143555..00000000
--- a/enterprise_versioned_docs/version-3.2.2/reference/apisix-cli.md
+++ /dev/null
@@ -1,52 +0,0 @@
----
-title: APISIX CLI
-slug: /reference/apisix-cli
----
-
-The APISIX CLI (Command Line Interface) is a tool that allows you to start, stop, and manage your APISIX instances.
-
-```bash
-apisix [action]
-```
-
-## Commands
-
-### `apisix help`
-
-Print the APISIX CLI help menu.
-
-### `apisix init`
-
-Initialize the `nginx.conf` configuration.
-
-### `apisix init_etcd`
-
-Initialize data in etcd.
-
-### `apisix start`
-
-Initialize and start the APISIX instance.
-
-### `apisix stop`
-
-Stop the running APISIX instance immediately. APISIX will stop all worker processes without waiting for them to finish serving any outstanding requests.
-
-### `apisix quit`
-
-Quit the running APISIX instance gracefully. APISIX will wait for all worker processes to finish serving any outstanding requests before stopping.
-
-### `apisix restart`
-
-Restart the APISIX instance. This command checks the generated `nginx.conf` configuration first before stopping and restarting APISIX.
-
-### `apisix reload`
-
-Reload the APISIX instance. Reinitialize `nginx.conf` and apply configuration changes without interrupting existing connections.
-
-### `apisix test`
-
-Test the generated `nginx.conf` to validate the configuration.
-
-### `apisix version`
-
-Print APISIX version.
diff --git a/enterprise_versioned_docs/version-3.2.2/reference/apisix-expressions.md b/enterprise_versioned_docs/version-3.2.2/reference/apisix-expressions.md
deleted file mode 100644
index 8f12d91e..00000000
--- a/enterprise_versioned_docs/version-3.2.2/reference/apisix-expressions.md
+++ /dev/null
@@ -1,56 +0,0 @@
----
-title: APISIX Expressions
-slug: /reference/apisix-expressions
----
-
-_APISIX Expressions_ are combinations of variables, operators, and values that can be evaluated to a result, such as a Boolean value, `true` or `false`. Expressions can be used in configurations for route matching, request filtering, selective plugin applications, log enrichment, and more.
-
-[//]:
-
-[//]:
-
-APISIX supports the evaluation of comparison operators and logical operators, as well as [regular expressions (RegEx)](https://www.pcre.org).
-
-## Comparison Operators
-
-APISIX supports the following comparison operators to be used with [built-in variables](./built-in-variables.md) in expressions:
-
-|**Operator**|**Description**|**Example**|
-|--------|-----------|-------|
-|`==` |equal |`["arg_version", "==", "v2"]`|
-|`~=` |not equal |`["arg_version", "~=", "v2"]`|
-|`>` |greater than|`["arg_ttl", ">", 3600]`|
-|`>=` |greater than or equal to|`["arg_ttl", ">=", 3600]`|
-|`<` |less than |`["arg_ttl", "<", 3600]`|
-|`<=` |less than or equal to|`["arg_ttl", "<=", 3600]`|
-|`~~` |match RegEx|`["arg_env", "~~", "[Dd]ev"]`|
-|`~*` |match RegEx (case-insensitive) |`["arg_env", "~~", "dev"]`|
-|`in` |exist in the right-hand side|`["arg_version", "in", ["v1","v2"]]`|
-|`has` |contain item in the right-hand side|`["graphql_root_fields", "has", "owner"]`|
-|`!` |reverse the adjacent operator|`["arg_env", "!", "~~", "[Dd]ev"]`|
-|`ipmatch` |match IP address|`["remote_addr", "ipmatch", ["192.168.102.40", "192.168.3.0/24"]]`|
-
-## Logical Operators
-
-APISIX supports the following logical operators:
-
-| **Operator** | **Explanation** |
-|---|---|
-| `AND` | `AND(A,B)` is true if both A and B are true. |
-| `OR` | `OR(A,B)` is true if either A or B is true. |
-| `!AND` | `!AND(A,B)` is true if either A or B is false. |
-| `!OR` | `!OR(A,B)` is true only if both A and B are false. |
-
-You can use logical operators to combine multiple expressions for evaluation, such as the following:
-
-```json
-[
- "AND",
- ["arg_version", "==", "v2"],
- [
- "OR",
- ["arg_action", "==", "signup"],
- ["arg_action", "==", "subscribe"]
- ]
-]
-```
diff --git a/enterprise_versioned_docs/version-3.2.2/reference/built-in-variables.md b/enterprise_versioned_docs/version-3.2.2/reference/built-in-variables.md
deleted file mode 100644
index bb2383ca..00000000
--- a/enterprise_versioned_docs/version-3.2.2/reference/built-in-variables.md
+++ /dev/null
@@ -1,76 +0,0 @@
----
-title: Built-In Variables
-slug: /reference/built-in-variables
----
-
-_Built-in variables_ in APISIX are pre-defined variables that can be directly referenced in configurations. The actual value of these variables will be replaced into the program at runtime, making APISIX configurations more descriptive and easier to manage.
-
-APISIX supports three types of built-in variables:
-
-* NGINX Variables
-* APISIX Variables
-* Custom Variables
-
-They are evaluated in a given order.
-
-## NGINX Variables
-
-APISIX supports NGINX variables in:
-
-1. Plugin configurations
-2. Custom logging formats in the configuration YAML files, similar to NGINX
-
-
-
-Some of the commonly used variables include:
-
-* `upstream_addr`
-* `remote_addr`
-* `request_uri`
-* `server_name`
-* `uri`
-* `http_user_agent`
-
-See the [complete list of NGINX variables](https://nginx.org/en/docs/varindex.html) for more information.
-
-[//]:
-
-## APISIX Variables
-
-In addition to [NGINX variables](https://nginx.org/en/docs/varindex.html), APISIX offers a variety of built-in variables to be used in plugins:
-
-| Variable Name | Description |
-|---------------------|-----------------------------------------------------------------------------------|
-| `balancer_ip` | Upstream server IP |
-| `balancer_port` | Upstream server port |
-| `consumer_name` | Consumer username |
-| `consumer_group_id` | Consumer group ID |
-| `graphql_name` | GraphQL [operation name](https://graphql.org/learn/queries/#operation-name) |
-| `graphql_operation` | GraphQL [operation type](https://graphql.org/learn/queries/#operation-name) |
-| `graphql_root_fields` | GraphQL [root fields](https://graphql.org/learn/execution/#root-fields-resolvers) |
-| `route_id` | Route ID |
-| `route_name` | Route name |
-| `service_id` | Service ID |
-| `service_name` | Service name |
-| `resp_body` | HTTP response body |
-| `mqtt_client_id` | Client ID in MQTT protocol |
-| `redis_cmd_line` | Redis command |
-| `rpc_time` | RPC request round-trip time |
-
-## Custom Variables
-
-You can create your own variables and use them as built-in variables in plugins.
-
-For more details on how to create custom variables, please refer to the plugins development guide (coming soon).
-
-## Evaluation Order
-
-APISIX evaluates variables in the given order:
-
-1. Custom Variables
-2. APISIX Variables
-3. NGINX Variables
-
-If a variable is successfully sourced in custom variables, APISIX will not continue to look in APISIX variables or NGINX variables.
-
-In other words, custom variables will **overwrite variables of the same names** defined in APISIX variables or NGINX variables, to better meet requirements of your specific use cases.
diff --git a/enterprise_versioned_docs/version-3.2.2/reference/configuration-files.md b/enterprise_versioned_docs/version-3.2.2/reference/configuration-files.md
deleted file mode 100644
index 8e0adf2f..00000000
--- a/enterprise_versioned_docs/version-3.2.2/reference/configuration-files.md
+++ /dev/null
@@ -1,83 +0,0 @@
----
-title: Configuration Files
-slug: /reference/configuration-files
----
-
-APISIX has the following configuration files under `/conf`:
-
-- `config-default.yaml`
-- `config.yaml`
-- `apisix.yaml`
-- `debug.yaml`
-
-This document provides a reference for how configuration files are used and how to manage configuration files by environments.
-
-## Usage
-
-### `config-default.yaml` and `config.yaml`
-
-APISIX comes with a default configuration file called `config-default.yaml` and a user-defined configuration file called `config.yaml`.
-
-The configurations in `config-default.conf` are used by default and **should not be modified**. It contains default configurations and comments for documentation:
-
-```yaml
-apisix:
- # node_listen: 9080 # APISIX listening port (single)
- node_listen: # APISIX listening ports (multiple)
- - 9080
- # - port: 9081
- # enable_http2: true # If not set, the default value is `false`.
- # - ip: 127.0.0.2
- # port: 9082
- # enable_http2: true
- enable_admin: true
- enable_dev_mode: false
- enable_reuseport: true
- ...
-```
-
-For a complete list of configuration options, see [`config-default.yaml`](https://github.com/apache/apisix/blob/master/conf/config-default.yaml).
-
-Custom configurations should be added to `config.yaml`, which take takes precedence over the configurations in `config-default.yaml`.
-
-APISIX loads these configuration files once at startup. If you make changes to these files, [reload APISIX](./apisix-cli.md#apisix-reload) for changes to take effect.
-
-### `apisix.yaml`
-
-In APISIX standalone deployment mode, `apisix.yaml` is used to configure APISIX resources, such as [routes](../background-information/key-concepts/routes.md), [upstreams](../background-information/key-concepts/upstreams.md), [consumers](../background-information/key-concepts/consumers.md), and others.
-
-These configurations are loaded by APISIX into memory at startup. Changes to this file do not require a reload of APISIX as the file is monitored for changes at a regular interval.
-
-For more information about how to configure `apisix.yaml`, see Standalone Mode (coming soon).
-
-[//]:
-
-### `debug.yaml`
-
-You can enable and customize APISIX debug mode using configuration options in `debug.yaml`.
-
-Changes to this file do not require a reload of APISIX as the file is monitored for changes at a regular interval.
-
-For more information about how to use debug mode, see Debug and Troubleshooting (coming soon).
-
-[//]:
-
-## Manage Configuration Files by Environments
-
-Keeping configuration files separate for different environments, such as development, staging, and production, can provide several benefits, including increased flexibility, improved security, and easier maintenance.
-
-APISIX supports separation of configuration files by environment. While the `config-default.yaml` file is always recognized as the default configuration, you can set the `APISIX_PROFILE` environment variable to determine which set of other configuration files APISIX should use.
-
-By default, when `APISIX_PROFILE` is not set, APISIX looks for the following configuration files:
-
-- `conf/config.yaml`
-- `conf/apisix.yaml`
-- `conf/debug.yaml`
-
-If the value of `APISIX_PROFILE` is set to `prod`, APISIX looks for the following configuration files:
-
-- `conf/config-prod.yaml`
-- `conf/apisix-prod.yaml`
-- `conf/debug-prod.yaml`
-
-You can set `APISIX_PROFILE` to any other value that matches your environment.
diff --git a/enterprise_versioned_docs/version-3.2.2/reference/reference.md b/enterprise_versioned_docs/version-3.2.2/reference/reference.md
new file mode 100644
index 00000000..7f98f3fd
--- /dev/null
+++ b/enterprise_versioned_docs/version-3.2.2/reference/reference.md
@@ -0,0 +1,4 @@
+---
+title: API Reference
+slug: /reference/reference
+---
\ No newline at end of file
diff --git a/enterprise_versioned_docs/version-3.2.2/security/block-sepc-ip-from-accessing-api.md b/enterprise_versioned_docs/version-3.2.2/security/block-sepc-ip-from-accessing-api.md
new file mode 100644
index 00000000..4d21976e
--- /dev/null
+++ b/enterprise_versioned_docs/version-3.2.2/security/block-sepc-ip-from-accessing-api.md
@@ -0,0 +1,4 @@
+---
+title: Block Specific IP from Accessing API
+slug: /security/block-sepc-ip-from-accessing-api
+---
\ No newline at end of file
diff --git a/enterprise_versioned_docs/version-3.2.2/security/enable-https-4-secure-client-api7-communication.md b/enterprise_versioned_docs/version-3.2.2/security/enable-https-4-secure-client-api7-communication.md
new file mode 100644
index 00000000..705af10b
--- /dev/null
+++ b/enterprise_versioned_docs/version-3.2.2/security/enable-https-4-secure-client-api7-communication.md
@@ -0,0 +1,4 @@
+---
+title: Enable HTTPS/mTLS for Secure Client-API7 Communication
+slug: /security/enable-https-4-secure-client-api7-communication
+---
\ No newline at end of file
diff --git a/enterprise_versioned_docs/version-3.2.2/security/fips-support.md b/enterprise_versioned_docs/version-3.2.2/security/fips-support.md
new file mode 100644
index 00000000..4e4fa411
--- /dev/null
+++ b/enterprise_versioned_docs/version-3.2.2/security/fips-support.md
@@ -0,0 +1,4 @@
+---
+title: FIPS Support
+slug: /security/fips-support
+---
\ No newline at end of file
diff --git a/enterprise_versioned_docs/version-3.2.2/security/manage-crossorigin-api-acces--with-cors.md b/enterprise_versioned_docs/version-3.2.2/security/manage-crossorigin-api-acces--with-cors.md
new file mode 100644
index 00000000..ef1fa20a
--- /dev/null
+++ b/enterprise_versioned_docs/version-3.2.2/security/manage-crossorigin-api-acces--with-cors.md
@@ -0,0 +1,4 @@
+---
+title: Manage Cross-Origin API Access with CORS
+slug: /security/manage-crossorigin-api-acces--with-cors
+---
\ No newline at end of file
diff --git a/enterprise_versioned_docs/version-3.2.2/security/role-based-access-control.md b/enterprise_versioned_docs/version-3.2.2/security/role-based-access-control.md
new file mode 100644
index 00000000..efc08d5f
--- /dev/null
+++ b/enterprise_versioned_docs/version-3.2.2/security/role-based-access-control.md
@@ -0,0 +1,4 @@
+---
+title: Role-Based Access Control
+slug: /security/role-based-access-control
+---
\ No newline at end of file
diff --git a/enterprise_versioned_docs/version-3.2.2/tools/cli-tools.md b/enterprise_versioned_docs/version-3.2.2/tools/cli-tools.md
new file mode 100644
index 00000000..09958b4a
--- /dev/null
+++ b/enterprise_versioned_docs/version-3.2.2/tools/cli-tools.md
@@ -0,0 +1,4 @@
+---
+title: Cli Tools
+slug: /tools/cli-tools
+---
\ No newline at end of file
diff --git a/enterprise_versioned_docs/version-3.2.2/tools/declaractive-api.md b/enterprise_versioned_docs/version-3.2.2/tools/declaractive-api.md
new file mode 100644
index 00000000..166265b3
--- /dev/null
+++ b/enterprise_versioned_docs/version-3.2.2/tools/declaractive-api.md
@@ -0,0 +1,4 @@
+---
+title: Declarative API
+slug: /tools/declarative-api
+---
\ No newline at end of file
diff --git a/enterprise_versioned_sidebars/version-3.2.2-sidebars.json b/enterprise_versioned_sidebars/version-3.2.2-sidebars.json
index dc1e0057..3f432b37 100644
--- a/enterprise_versioned_sidebars/version-3.2.2-sidebars.json
+++ b/enterprise_versioned_sidebars/version-3.2.2-sidebars.json
@@ -1,137 +1,112 @@
{
"docs": [
{
- "type": "doc",
- "id": "introduction"
+ "type": "category",
+ "label": "概述",
+ "items": [
+ "introduction/what-is-api7-ee",
+ "introduction/api7-ee-architecture",
+ "introduction/api7-ee-vs-api7-cloud-apisix"
+ ]
},
{
"type": "category",
- "label": "入门",
+ "label": "概念",
"items": [
- "getting-started/get-apisix",
- "getting-started/configure-routes",
- "getting-started/load-balancing",
- "getting-started/key-authentication",
- "getting-started/rate-limiting"
+ "key-concepts/routes",
+ "key-concepts/upstreams",
+ "key-concepts/services",
+ "key-concepts/plugins",
+ "key-concepts/consumers",
+ "key-concepts/ssl-certificates",
+ "key-concepts/gateway-instance"
]
},
{
"type": "category",
- "label": "操作指南",
+ "label": "快速入门",
+ "items": [
+ "getting-started/install-api7-ee",
+ "getting-started/set-up-and-launch-init-api"
+ ]
+ },
+ {
+ "type": "category",
+ "label": "API 生命周期管理",
"items": [
{
"type": "category",
- "label": "可观测",
- "items": [
- "how-to-guide/observability/monitor-apisix-with-prometheus",
- "how-to-guide/observability/log-with-clickhouse"
- ]
- },
- {
- "type": "category",
- "label": "安全",
+ "label": "API 设计",
"items": [
- {
- "type": "category",
- "label": "密钥管理",
- "items": [
- "how-to-guide/security/secrets-management/manage-secrets-in-hashicorp-vault"
- ]
- }
+ "api-full-lifecycle-management/api-design/export-openapi-sepc",
+ "api-full-lifecycle-management/api-design/plan-api-endpoints"
]
},
{
"type": "category",
- "label": "权限验证",
+ "label": "API 发布",
"items": [
- "how-to-guide/authentication/set-up-sso-with-oidc-and-keycloak"
+ "api-full-lifecycle-management/api-publish/apply-api-rate-limit-policies",
+ "api-full-lifecycle-management/api-publish/configure-api-upstream",
+ "api-full-lifecycle-management/api-publish/import-api-define-of-service",
+ "api-full-lifecycle-management/api-publish/set-up-api-auth",
+ "api-full-lifecycle-management/api-publish/transform-api-req-2-simplify-integration"
]
- },
+ },
{
"type": "category",
- "label": "请求转换",
+ "label": "API 消费",
"items": [
- "how-to-guide/transformation/transcode-http-to-grpc"
+ "api-full-lifecycle-management/api-consumption/allow-list-based-accesss-control-of-consumers",
+ "api-full-lifecycle-management/api-consumption/manage-api-consumer-credentials",
+ "api-full-lifecycle-management/api-consumption/publish-api-4-discovery-and-integration"
]
- },
+ },
{
"type": "category",
- "label": "流量管理",
+ "label": "API 运行时",
"items": [
- {
- "type": "category",
- "label": "TLS and mTLS",
- "items": [
- "how-to-guide/traffic-management/tls-and-mtls/configure-upstream-https",
- "how-to-guide/traffic-management/tls-and-mtls/configure-https-between-client-and-apisix",
- "how-to-guide/traffic-management/tls-and-mtls/configure-mtls-between-client-and-apisix"
- ]
- }
+ "api-full-lifecycle-management/api-runtime/log-api-traffic",
+ "api-full-lifecycle-management/api-runtime/monitor-api-metrics",
+ "api-full-lifecycle-management/api-runtime/trigger-alerts-of-unusual-api-activities"
]
}
]
},
{
"type": "category",
- "label": "背景信息",
+ "label": "安全",
"items": [
- {
- "type": "category",
- "label": "主要概念",
- "items": [
- "background-information/key-concepts/routes",
- "background-information/key-concepts/upstreams",
- "background-information/key-concepts/services",
- "background-information/key-concepts/plugins",
- "background-information/key-concepts/plugin-global-rules",
- "background-information/key-concepts/plugin-metadata",
- "background-information/key-concepts/consumers",
- "background-information/key-concepts/ssl-certificates",
- "background-information/key-concepts/secrets",
- "background-information/key-concepts/protos"
- ]
- }
+ "security/block-sepc-ip-from-accessing-api",
+ "security/enable-https-4-secure-client-api7-communication",
+ "security/fips-support",
+ "security/manage-crossorigin-api-acces--with-cors",
+ "security/manage-crossorigin-api-acces--with-cors"
]
},
{
"type": "category",
- "label": "企业版",
+ "label": "管理",
"items": [
- "enterprise-edition/install-enterprise-trial",
- {
- "type": "category",
- "label": "功能",
- "items": [
- "enterprise-edition/features/rbac",
- "enterprise-edition/features/audit"
- ]
- }
+ "administration/active-and-renew-license",
+ "administration/manage-token-2-use-api7-ee-api",
+ "administration/track-user-actions-for-security-audition"
]
},
{
"type": "category",
- "label": "插件",
+ "label": "工具",
"items": [
- {
- "type": "category",
- "label": "流量管理",
- "items": [
- "plugins/traffic-management/traffic-label",
- "plugins/traffic-management/graphql-proxy-cache",
- "plugins/traffic-management/graphql-limit-count"
- ]
- }
+ "tools/cli-tools",
+ "tools/declaractive-api"
]
},
{
"type": "category",
- "label": "参考文档",
+ "label": "参考",
"items": [
- "reference/built-in-variables",
- "reference/apisix-expressions",
- "reference/configuration-files",
- "reference/apisix-cli"
+ "reference/reference"
]
}
]
-}
+}
\ No newline at end of file