diff --git a/.asf.yaml b/.asf.yaml
index 2fbe3776c084..ef0571a9aa86 100644
--- a/.asf.yaml
+++ b/.asf.yaml
@@ -52,7 +52,7 @@ github:
required_pull_request_reviews:
dismiss_stale_reviews: true
require_code_owner_reviews: true
- required_approving_review_count: 2
+ required_approving_review_count: 3
release/3.9:
required_pull_request_reviews:
require_code_owner_reviews: true
diff --git a/.github/workflows/centos7-ci.yml b/.github/workflows/centos7-ci.yml
deleted file mode 100644
index 177e9c8fb8d9..000000000000
--- a/.github/workflows/centos7-ci.yml
+++ /dev/null
@@ -1,181 +0,0 @@
-name: CI Centos7
-
-on:
- push:
- branches: [master, 'release/**']
- paths-ignore:
- - 'docs/**'
- - '**/*.md'
- pull_request:
- branches: [master, 'release/**']
- paths-ignore:
- - 'docs/**'
- - '**/*.md'
-
-concurrency:
- group: ${{ github.workflow }}-${{ github.ref == 'refs/heads/master' && github.run_number || github.ref }}
- cancel-in-progress: true
-
-permissions:
- contents: read
-
-jobs:
- test_apisix:
- name: run ci on centos7
- runs-on: ubuntu-20.04
- timeout-minutes: 90
- strategy:
- fail-fast: false
- matrix:
- events_module:
- - lua-resty-worker-events
- - lua-resty-events
- test_dir:
- - t/plugin/[a-k]*
- - t/plugin/[l-z]*
- - t/admin t/cli t/config-center-yaml t/control t/core t/debug t/discovery t/error_page t/misc
- - t/node t/pubsub t/router t/script t/secret t/stream-node t/utils t/xds-library
-
- steps:
- - name: Check out code
- uses: actions/checkout@v4
- with:
- submodules: recursive
-
- - name: Cache deps
- uses: actions/cache@v4
- env:
- cache-name: cache-deps
- with:
- path: deps
- key: ${{ runner.os }}-${{ env.cache-name }}-centos7-${{ hashFiles('apisix-master-0.rockspec') }}
-
- - name: Extract branch name
- if: ${{ startsWith(github.ref, 'refs/heads/release/') }}
- id: branch_env
- shell: bash
- run: |
- echo "version=${GITHUB_REF##*/}" >>$GITHUB_OUTPUT
-
- - name: Extract test type
- shell: bash
- id: test_env
- run: |
- test_dir="${{ matrix.test_dir }}"
- if [[ $test_dir =~ 't/plugin' ]]; then
- echo "type=plugin" >>$GITHUB_OUTPUT
- fi
- if [[ $test_dir =~ 't/admin ' ]]; then
- echo "type=first" >>$GITHUB_OUTPUT
- fi
- if [[ $test_dir =~ ' t/xds-library' ]]; then
- echo "type=last" >>$GITHUB_OUTPUT
- fi
-
- - name: Free disk space
- run: |
- bash ./ci/free_disk_space.sh
-
- - name: Linux launch common services
- run: |
- make ci-env-up project_compose_ci=ci/pod/docker-compose.common.yml
- sudo ./ci/init-common-test-service.sh
-
- - name: Build rpm package
- if: ${{ startsWith(github.ref, 'refs/heads/release/') }}
- run: |
- export VERSION=${{ steps.branch_env.outputs.version }}
- sudo gem install --no-document fpm
- git clone --depth 1 https://github.com/api7/apisix-build-tools.git
-
- # move codes under build tool
- mkdir ./apisix-build-tools/apisix
- for dir in `ls|grep -v "^apisix-build-tools$"`;do cp -r $dir ./apisix-build-tools/apisix/;done
-
- cd apisix-build-tools
- make package type=rpm app=apisix version=${VERSION} checkout=release/${VERSION} image_base=centos image_tag=7 local_code_path=./apisix
- cd ..
- rm -rf $(ls -1 -I apisix-build-tools -I t -I utils -I ci --ignore=Makefile -I "*.rockspec")
-
- - name: Start Dubbo Backend
- run: |
- cur_dir=$(pwd)
- sudo apt update
- sudo apt install -y maven
- cd t/lib/dubbo-backend
- mvn package
- cd dubbo-backend-provider/target
- java -Djava.net.preferIPv4Stack=true -jar dubbo-demo-provider.one-jar.jar > /tmp/java.log &
- cd $cur_dir/t/lib/dubbo-serialization-backend
- mvn package
- cd dubbo-serialization-backend-provider/target
- java -Djava.net.preferIPv4Stack=true -jar dubbo-demo-provider.one-jar.jar > /tmp/java2.log &
-
-
- - name: Build xDS library
- if: steps.test_env.outputs.type == 'last'
- run: |
- cd t/xds-library
- go build -o libxds.so -buildmode=c-shared main.go export.go
-
- - name: Run centos7 docker and mapping apisix into container
- env:
- TEST_FILE_SUB_DIR: ${{ matrix.test_dir }}
- TEST_EVENTS_MODULE: ${{ matrix.events_module }}
- run: |
- docker run -itd -v ${{ github.workspace }}:/apisix --env TEST_FILE_SUB_DIR="$TEST_FILE_SUB_DIR" --env TEST_EVENTS_MODULE="$TEST_EVENTS_MODULE" --name centos7Instance --net="host" --dns 8.8.8.8 --dns-search apache.org docker.io/centos:7 /bin/bash
- # docker exec centos7Instance bash -c "cp -r /tmp/apisix ./"
-
- - name: Cache images
- id: cache-images
- uses: actions/cache@v4
- env:
- cache-name: cache-apisix-docker-images
- with:
- path: docker-images-backup
- key: ${{ runner.os }}-${{ env.cache-name }}-${{ steps.test_env.outputs.type }}-${{ hashFiles(format('./ci/pod/docker-compose.{0}.yml', steps.test_env.outputs.type )) }}
-
- - if: ${{ steps.cache-images.outputs.cache-hit == 'true' }}
- name: Load saved docker images
- run: |
- if [[ -f docker-images-backup/apisix-images.tar ]]; then
- [[ ${{ steps.test_env.outputs.type }} != first ]] && sudo ./ci/init-${{ steps.test_env.outputs.type }}-test-service.sh before
- docker load --input docker-images-backup/apisix-images.tar
- rm docker-images-backup/apisix-images.tar
- make ci-env-up project_compose_ci=ci/pod/docker-compose.${{ steps.test_env.outputs.type }}.yml
- echo "loaded docker images"
- if [[ ${{ steps.test_env.outputs.type }} != first ]]; then
- sudo ./ci/init-${{ steps.test_env.outputs.type }}-test-service.sh after
- fi
- fi
- - if: ${{ steps.cache-images.outputs.cache-hit != 'true' }}
- name: Linux launch services
- run: |
- [[ ${{ steps.test_env.outputs.type }} != first ]] && sudo ./ci/init-${{ steps.test_env.outputs.type }}-test-service.sh before
- [[ ${{ steps.test_env.outputs.type }} == plugin ]] && ./ci/pod/openfunction/build-function-image.sh
- make ci-env-up project_compose_ci=ci/pod/docker-compose.${{ steps.test_env.outputs.type }}.yml
- [[ ${{ steps.test_env.outputs.type }} != first ]] && sudo ./ci/init-${{ steps.test_env.outputs.type }}-test-service.sh after
- echo "Linux launch services, done."
-
- - name: Install dependencies
- run: |
- docker exec centos7Instance bash -c "cd apisix && ./ci/centos7-ci.sh install_dependencies"
-
- - name: Install rpm package
- if: ${{ startsWith(github.ref, 'refs/heads/release/') }}
- run: |
- docker exec centos7Instance bash -c "cd apisix && rpm -iv --prefix=/apisix ./apisix-build-tools/output/apisix-${{ steps.branch_env.outputs.version }}-0.el7.x86_64.rpm"
- # Dependencies are attached with rpm, so revert `make deps`
- docker exec centos7Instance bash -c "cd apisix && rm -rf deps"
- docker exec centos7Instance bash -c "cd apisix && mv usr/bin . && mv usr/local/apisix/* ."
-
- - name: Run test cases
- run: |
- docker exec centos7Instance bash -c "cd apisix && ./ci/centos7-ci.sh run_case"
-
- - if: ${{ steps.cache-images.outputs.cache-hit != 'true' }}
- name: Save docker images
- run: |
- echo "start backing up, $(date)"
- bash ./ci/backup-docker-images.sh ${{ steps.test_env.outputs.type }}
- echo "backup done, $(date)"
diff --git a/.github/workflows/chaos.yml.disabled b/.github/workflows/chaos.yml.disabled
deleted file mode 100644
index 7b47664c55e9..000000000000
--- a/.github/workflows/chaos.yml.disabled
+++ /dev/null
@@ -1,88 +0,0 @@
-name: Chaos Test
-
-on:
- pull_request:
- branches: [master, 'release/**']
- paths-ignore:
- - 'docs/**'
- - '**/*.md'
-
-concurrency:
- group: ${{ github.workflow }}-${{ github.ref == 'refs/heads/master' && github.run_number || github.ref }}
- cancel-in-progress: true
-
-permissions:
- contents: read
-
-jobs:
- chaos-test:
- runs-on: ubuntu-latest
- timeout-minutes: 35
- steps:
- - uses: actions/checkout@v4
- with:
- submodules: recursive
-
- - name: Setup go
- uses: actions/setup-go@v4
- with:
- go-version: "1.17"
-
- - uses: actions/cache@v3
- with:
- path: |
- ~/.cache/go-build
- ~/go/pkg/mod
- key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
- restore-keys: |
- ${{ runner.os }}-go-
-
- - name: Creating minikube cluster
- run: |
- bash ./t/chaos/utils/setup_chaos_utils.sh start_minikube
- mkdir logs
- docker build -t apache/apisix:alpine-local --build-arg APISIX_PATH=. -f ./t/chaos/utils/Dockerfile .
- minikube cache add apache/apisix:alpine-local -v 7 --alsologtostderr
-
- - name: Print cluster information
- run: |
- kubectl config view
- kubectl cluster-info
- kubectl get nodes
- kubectl get pods -n kube-system
- kubectl version
-
- - name: Deploy etcd with Helm
- run: |
- curl https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3 | bash
- helm repo add bitnami https://charts.bitnami.com/bitnami
- helm install etcd bitnami/etcd --set replicaCount=3 --set auth.rbac.create=false
- kubectl wait pods -l app.kubernetes.io/instance=etcd --for=condition=Ready --timeout=300s --all
-
-
- - name: Deploy APISIX
- run: |
- bash ./t/chaos/utils/setup_chaos_utils.sh modify_config
- kubectl create configmap apisix-gw-config.yaml --from-file=./conf/config.yaml
- kubectl apply -f ./t/chaos/kubernetes/deployment.yaml
- kubectl apply -f ./t/chaos/kubernetes/service.yaml
- kubectl wait pods -l app=apisix-gw --for=condition=Ready --timeout=300s \
- || (kubectl logs -l app=apisix-gw && exit 1)
- kubectl apply -f https://raw.githubusercontent.com/istio/istio/master/samples/httpbin/httpbin.yaml
- kubectl wait pods -l app=httpbin --for=condition=Ready --timeout=300s \
- || (kubectl logs -l app=httpbin && exit 1)
- bash ./t/chaos/utils/setup_chaos_utils.sh port_forward
-
- - name: Deploy Chaos Mesh
- run: |
- curl -sSL https://mirrors.chaos-mesh.org/v2.0.1/install.sh | bash
-
-
- - name: Install Ginkgo
- run: |
- go get -u github.com/onsi/ginkgo/ginkgo
- sudo cp ~/go/bin/ginkgo /usr/local/bin
-
- - name: Run test
- working-directory: ./t/chaos
- run: ginkgo -r --v --progress --trace
diff --git a/.github/workflows/fips.yml b/.github/workflows/fips.yml
deleted file mode 100644
index 2be0f3a1d911..000000000000
--- a/.github/workflows/fips.yml
+++ /dev/null
@@ -1,136 +0,0 @@
-name: CI FIPS
-
-on:
- push:
- branches: [master]
- paths-ignore:
- - 'docs/**'
- - '**/*.md'
- pull_request:
- branches: [master]
- paths-ignore:
- - 'docs/**'
- - '**/*.md'
-
-permissions:
- contents: read
-
-jobs:
- build:
- strategy:
- fail-fast: false
- matrix:
- platform:
- - ubuntu-20.04
- os_name:
- - linux_openresty
- test_dir:
- # all plugins only use three parts of openssl API: RSA via ffi, SHA via ffi and SSL API wrapped by nginx.
- # The latter one is already covered by the core tests, so no need to repeat it in plugin tests.
- # The RSA and SHA tests are fully covered by jwt-auth and hmac-auth plugin tests, while other plugins only repeat such tests.
- - t/plugin/jwt-auth2.t t/plugin/jwt-auth.t t/plugin/hmac-auth.t
- # all SSL related core tests are covered by below two lists.
- - t/admin/ssl* t/admin/schema.t t/admin/upstream.t t/config-center-yaml/ssl.t t/core/etcd-mtls.t t/core/config_etcd.t t/misc/patch.t
- - t/node/grpc-proxy-unary.t t/node/upstream-keepalive-pool.t t/node/upstream-websocket.t t/node/client-mtls.t t/node/upstream-mtls.t t/pubsub/kafka.t t/router/radixtree-sni2.t t/router/multi-ssl-certs.t t/router/radixtree-sni.t t/stream-node/mtls.t t/stream-node/tls.t t/stream-node/upstream-tls.t t/stream-node/sni.t
- - t/fips
-
- runs-on: ${{ matrix.platform }}
- timeout-minutes: 90
- env:
- SERVER_NAME: ${{ matrix.os_name }}
- OPENRESTY_VERSION: default
- ENABLE_FIPS: true
-
- steps:
- - name: Check out code
- uses: actions/checkout@v4
- with:
- submodules: recursive
-
- - name: Cache deps
- uses: actions/cache@v4
- env:
- cache-name: cache-deps
- with:
- path: deps
- key: ${{ runner.os }}-${{ env.cache-name }}-${{ matrix.os_name }}-${{ hashFiles('apisix-master-0.rockspec') }}
-
- - name: Extract test type
- shell: bash
- id: test_env
- run: |
- test_dir="${{ matrix.test_dir }}"
- if [[ $test_dir =~ 't/plugin' ]]; then
- echo "type=plugin" >>$GITHUB_OUTPUT
- fi
- if [[ $test_dir =~ 't/fips' ]]; then
- echo "type=plugin" >>$GITHUB_OUTPUT
- fi
- if [[ $test_dir =~ 't/admin' ]]; then
- echo "type=first" >>$GITHUB_OUTPUT
- fi
- if [[ $test_dir =~ 't/node' ]]; then
- echo "type=last" >>$GITHUB_OUTPUT
- fi
-
- - name: Free disk space
- run: |
- bash ./ci/free_disk_space.sh
-
- - name: Linux launch common services
- run: |
- make ci-env-up project_compose_ci=ci/pod/docker-compose.common.yml
- sudo ./ci/init-common-test-service.sh
-
- - name: Cache images
- id: cache-images
- uses: actions/cache@v4
- env:
- cache-name: cache-apisix-docker-images
- with:
- path: docker-images-backup
- key: ${{ runner.os }}-${{ env.cache-name }}-${{ steps.test_env.outputs.type }}-${{ hashFiles(format('./ci/pod/docker-compose.{0}.yml', steps.test_env.outputs.type )) }}
-
- - if: ${{ steps.cache-images.outputs.cache-hit == 'true' }}
- name: Load saved docker images
- run: |
- if [[ -f docker-images-backup/apisix-images.tar ]]; then
- [[ ${{ steps.test_env.outputs.type }} != first ]] && sudo ./ci/init-${{ steps.test_env.outputs.type }}-test-service.sh before
- docker load --input docker-images-backup/apisix-images.tar
- rm docker-images-backup/apisix-images.tar
- make ci-env-up project_compose_ci=ci/pod/docker-compose.${{ steps.test_env.outputs.type }}.yml
- echo "loaded docker images"
- if [[ ${{ steps.test_env.outputs.type }} != first ]]; then
- sudo ./ci/init-${{ steps.test_env.outputs.type }}-test-service.sh after
- fi
- fi
- - if: ${{ steps.cache-images.outputs.cache-hit != 'true' }}
- name: Linux launch services
- run: |
- [[ ${{ steps.test_env.outputs.type }} != first ]] && sudo ./ci/init-${{ steps.test_env.outputs.type }}-test-service.sh before
- [[ ${{ steps.test_env.outputs.type }} == plugin ]] && ./ci/pod/openfunction/build-function-image.sh
- make ci-env-up project_compose_ci=ci/pod/docker-compose.${{ steps.test_env.outputs.type }}.yml
- echo "make ci-env-up, done"
- [[ ${{ steps.test_env.outputs.type }} != first ]] && sudo ./ci/init-${{ steps.test_env.outputs.type }}-test-service.sh after
- echo "Linux launch services, done"
-
- - name: Linux Before install
- run: sudo ./ci/${{ matrix.os_name }}_runner.sh before_install
-
- - name: Linux Install
- run: |
- sudo --preserve-env=ENABLE_FIPS \
- ./ci/${{ matrix.os_name }}_runner.sh do_install
-
- - name: Linux Script
- env:
- OPENSSL_FIPS: yes
- TEST_FILE_SUB_DIR: ${{ matrix.test_dir }}
- run: sudo -E ./ci/${{ matrix.os_name }}_runner.sh script
-
- - if: ${{ steps.cache-images.outputs.cache-hit != 'true' }}
- name: Save docker images
- run: |
- echo "start backing up, $(date)"
- bash ./ci/backup-docker-images.sh ${{ steps.test_env.outputs.type }}
- echo "backup done, $(date)"
diff --git a/.github/workflows/fuzzing-ci.yaml b/.github/workflows/fuzzing-ci.yaml
deleted file mode 100644
index bd88fd0ed95f..000000000000
--- a/.github/workflows/fuzzing-ci.yaml
+++ /dev/null
@@ -1,86 +0,0 @@
-name: fuzzing
-
-on:
- push:
- branches: [master, 'release/**']
- paths-ignore:
- - 'docs/**'
- - '**/*.md'
- pull_request:
- branches: [master, 'release/**']
- paths-ignore:
- - 'docs/**'
- - '**/*.md'
-
-concurrency:
- group: ${{ github.workflow }}-${{ github.ref == 'refs/heads/master' && github.run_number || github.ref }}
- cancel-in-progress: true
-
-permissions:
- contents: read
-
-jobs:
- test_apisix:
- name: run fuzzing
- runs-on: ubuntu-latest
- timeout-minutes: 30
-
- steps:
- - name: Check out code
- uses: actions/checkout@v4
- with:
- submodules: recursive
-
- - name: Cache deps
- uses: actions/cache@v4
- env:
- cache-name: cache-deps
- with:
- path: deps
- key: ${{ runner.os }}-${{ env.cache-name }}-${{ hashFiles('apisix-master-0.rockspec') }}
-
- - name: Linux launch common services
- run: |
- project_compose_ci=ci/pod/docker-compose.common.yml make ci-env-up
-
- - name: Linux Before install
- run: sudo ./ci/linux_openresty_runner.sh before_install
-
- - name: Linux Install
- run: |
- sudo --preserve-env=OPENRESTY_VERSION \
- ./ci/linux_openresty_runner.sh do_install
-
- - name: run apisix
- run: |
- source ./ci/common.sh
- export_version_info
- export_or_prefix
- make init
- make run
-
- - name: run upstream
- run: |
- source ./ci/common.sh
- export_version_info
- export_or_prefix
- sudo /usr/local/openresty/bin/openresty -c $PWD/t/fuzzing/upstream/nginx.conf
-
- - name: install boofuzz
- run: |
- # Avoid "ERROR: flask has requirement click>=8.0, but you'll have click 7.0 which is incompatible"
- sudo apt remove python3-click
- pip install -r $PWD/t/fuzzing/requirements.txt
-
- - name: run tests
- run: |
- source ./ci/common.sh
- export_version_info
- export_or_prefix
- export APISIX_FUZZING_PWD=$PWD
- python $PWD/t/fuzzing/simpleroute_test.py
- python $PWD/t/fuzzing/serverless_route_test.py
- python $PWD/t/fuzzing/vars_route_test.py
- python $PWD/t/fuzzing/client_abort.py
- python $PWD/t/fuzzing/simple_http.py
- python $PWD/t/fuzzing/http_upstream.py
diff --git a/.github/workflows/source-install.yml b/.github/workflows/source-install.yml
index b8c0dcb8e40b..62d192287cbc 100644
--- a/.github/workflows/source-install.yml
+++ b/.github/workflows/source-install.yml
@@ -27,7 +27,6 @@ jobs:
platform:
- ubuntu-20.04
os_platform:
- - centos7
- ubuntu
- redhat
services:
diff --git a/Makefile b/Makefile
index fa8b972df8bd..4c0ed68febbf 100644
--- a/Makefile
+++ b/Makefile
@@ -142,7 +142,11 @@ deps: install-runtime
### undeps : Uninstalling dependencies
.PHONY: undeps
-undeps: uninstall-runtime
+undeps: uninstall-rocks uninstall-runtime
+
+
+.PHONY: uninstall-rocks
+uninstall-rocks:
@$(call func_echo_status, "$@ -> [ Start ]")
$(ENV_LUAROCKS) purge --tree=deps
@$(call func_echo_success_status, "$@ -> [ Done ]")
diff --git a/README.md b/README.md
index befd5b101ce3..4cb9180e2034 100644
--- a/README.md
+++ b/README.md
@@ -42,6 +42,7 @@ The technical architecture of Apache APISIX:
## Community
+- [Kindly Write a Review](https://www.g2.com/products/apache-apisix/reviews) for APISIX in G2.
- Mailing List: Mail to dev-subscribe@apisix.apache.org, follow the reply to subscribe to the mailing list.
- Slack Workspace - [invitation link](https://apisix.apache.org/slack) (Please open an [issue](https://apisix.apache.org/docs/general/submit-issue) if this link is expired), and then join the #apisix channel (Channels -> Browse channels -> search for "apisix").
-  - follow and interact with us using hashtag `#ApacheAPISIX`
@@ -70,7 +71,8 @@ A/B testing, canary release, blue-green deployment, limit rate, defense against
- Proxy Websocket
- Proxy Protocol
- HTTP(S) Forward Proxy
- - [SSL](docs/en/latest/certificate.md): Dynamically load an SSL certificate.
+ - [SSL](docs/en/latest/certificate.md): Dynamically load an SSL certificate
+ - [HTTP/3 with QUIC](docs/en/latest/http3.md)
- **Full Dynamic**
@@ -133,7 +135,7 @@ A/B testing, canary release, blue-green deployment, limit rate, defense against
- [Global Rule](docs/en/latest/terminology/global-rule.md): Allows to run any plugin for all request, eg: limit rate, IP filter etc.
- High performance: The single-core QPS reaches 18k with an average delay of fewer than 0.2 milliseconds.
- [Fault Injection](docs/en/latest/plugins/fault-injection.md)
- - [REST Admin API](docs/en/latest/admin-api.md): Using the REST Admin API to control Apache APISIX, which only allows 127.0.0.1 access by default, you can modify the `allow_admin` field in `conf/config.yaml` to specify a list of IPs that are allowed to call the Admin API. Also, note that the Admin API uses key auth to verify the identity of the caller. **The `admin_key` field in `conf/config.yaml` needs to be modified before deployment to ensure security**.
+ - [REST Admin API](docs/en/latest/admin-api.md): Using the REST Admin API to control Apache APISIX, which only allows 127.0.0.1 access by default, you can modify the `allow_admin` field in `conf/config.yaml` to specify a list of IPs that are allowed to call the Admin API. Also, note that the Admin API uses key auth to verify the identity of the caller.
- External Loggers: Export access logs to external log management tools. ([HTTP Logger](docs/en/latest/plugins/http-logger.md), [TCP Logger](docs/en/latest/plugins/tcp-logger.md), [Kafka Logger](docs/en/latest/plugins/kafka-logger.md), [UDP Logger](docs/en/latest/plugins/udp-logger.md), [RocketMQ Logger](docs/en/latest/plugins/rocketmq-logger.md), [SkyWalking Logger](docs/en/latest/plugins/skywalking-logger.md), [Alibaba Cloud Logging(SLS)](docs/en/latest/plugins/sls-logger.md), [Google Cloud Logging](docs/en/latest/plugins/google-cloud-logging.md), [Splunk HEC Logging](docs/en/latest/plugins/splunk-hec-logging.md), [File Logger](docs/en/latest/plugins/file-logger.md), [SolarWinds Loggly Logging](docs/en/latest/plugins/loggly.md), [TencentCloud CLS](docs/en/latest/plugins/tencent-cloud-cls.md)).
- [ClickHouse](docs/en/latest/plugins/clickhouse-logger.md): push logs to ClickHouse.
- [Elasticsearch](docs/en/latest/plugins/elasticsearch-logger.md): push logs to Elasticsearch.
@@ -191,12 +193,6 @@ Using AWS's eight-core server, APISIX's QPS reaches 140,000 with a latency of on
[APISIX also works perfectly in AWS graviton3 C7g.](https://apisix.apache.org/blog/2022/06/07/installation-performance-test-of-apigateway-apisix-on-aws-graviton3)
-## Contributor Over Time
-
-> [visit here](https://www.apiseven.com/contributor-graph) to generate Contributor Over Time.
-
-[](https://www.apiseven.com/en/contributor-graph?repo=apache/apisix)
-
## User Stories
- [European eFactory Platform: API Security Gateway – Using APISIX in the eFactory Platform](https://www.efactory-project.eu/post/api-security-gateway-using-apisix-in-the-efactory-platform)
@@ -231,15 +227,6 @@ A wide variety of companies and organizations use APISIX API Gateway for researc
- XPENG
- Zoom
-## Landscape
-
-
-
-
-APISIX enriches the
-CNCF API Gateway Landscape.
-
-
## Logos
- [Apache APISIX logo(PNG)](https://github.com/apache/apisix/tree/master/logos/apache-apisix.png)
diff --git a/apisix-master-0.rockspec b/apisix-master-0.rockspec
index f94aed12749e..75b90d9497e3 100644
--- a/apisix-master-0.rockspec
+++ b/apisix-master-0.rockspec
@@ -48,10 +48,9 @@ dependencies = {
"lua-resty-session = 3.10",
"opentracing-openresty = 0.1",
"lua-resty-radixtree = 2.9.1",
- "lua-protobuf = 0.5.0-1",
+ "lua-protobuf = 0.5.2-1",
"lua-resty-openidc = 1.7.6-3",
"luafilesystem = 1.7.0-2",
- "api7-lua-tinyyaml = 0.4.4",
"nginx-lua-prometheus-api7 = 0.20240201-1",
"jsonschema = 0.9.8",
"lua-resty-ipmatcher = 0.6.1",
diff --git a/apisix/cli/file.lua b/apisix/cli/file.lua
index 88d0522a7e77..c01736d16180 100644
--- a/apisix/cli/file.lua
+++ b/apisix/cli/file.lua
@@ -15,7 +15,7 @@
-- limitations under the License.
--
-local yaml = require("tinyyaml")
+local yaml = require("lyaml")
local profile = require("apisix.core.profile")
local util = require("apisix.cli.util")
local dkjson = require("dkjson")
@@ -23,7 +23,6 @@ local dkjson = require("dkjson")
local pairs = pairs
local type = type
local tonumber = tonumber
-local getmetatable = getmetatable
local getenv = os.getenv
local str_gmatch = string.gmatch
local str_find = string.find
@@ -157,14 +156,6 @@ local function replace_by_reserved_env_vars(conf)
end
-local function tinyyaml_type(t)
- local mt = getmetatable(t)
- if mt then
- return mt.__type
- end
-end
-
-
local function path_is_multi_type(path, type_val)
if str_sub(path, 1, 14) == "nginx_config->" and
(type_val == "number" or type_val == "string") then
@@ -188,7 +179,7 @@ local function merge_conf(base, new_tab, ppath)
for key, val in pairs(new_tab) do
if type(val) == "table" then
- if tinyyaml_type(val) == "null" then
+ if val == yaml.null then
base[key] = nil
elseif tab_is_array(val) then
@@ -243,7 +234,7 @@ function _M.read_yaml_conf(apisix_home)
return nil, err
end
- local default_conf = yaml.parse(default_conf_yaml)
+ local default_conf = yaml.load(default_conf_yaml)
if not default_conf then
return nil, "invalid config-default.yaml file"
end
@@ -266,7 +257,7 @@ function _M.read_yaml_conf(apisix_home)
end
if not is_empty_file then
- local user_conf = yaml.parse(user_conf_yaml)
+ local user_conf = yaml.load(user_conf_yaml)
if not user_conf then
return nil, "invalid config.yaml file"
end
@@ -306,7 +297,7 @@ function _M.read_yaml_conf(apisix_home)
local apisix_conf_path = profile:yaml_path("apisix")
local apisix_conf_yaml, _ = util.read_file(apisix_conf_path)
if apisix_conf_yaml then
- local apisix_conf = yaml.parse(apisix_conf_yaml)
+ local apisix_conf = yaml.load(apisix_conf_yaml)
if apisix_conf then
local ok, err = resolve_conf_var(apisix_conf)
if not ok then
diff --git a/apisix/core/config_yaml.lua b/apisix/core/config_yaml.lua
index ce8c8321663a..218c8743bd7d 100644
--- a/apisix/core/config_yaml.lua
+++ b/apisix/core/config_yaml.lua
@@ -21,7 +21,7 @@
local config_local = require("apisix.core.config_local")
local config_util = require("apisix.core.config_util")
-local yaml = require("tinyyaml")
+local yaml = require("lyaml")
local log = require("apisix.core.log")
local json = require("apisix.core.json")
local new_tab = require("table.new")
@@ -100,7 +100,7 @@ local function read_apisix_yaml(premature, pre_mtime)
local yaml_config = f:read("*a")
f:close()
- local apisix_yaml_new = yaml.parse(yaml_config)
+ local apisix_yaml_new = yaml.load(yaml_config)
if not apisix_yaml_new then
log.error("failed to parse the content of file " .. apisix_yaml_path)
return
diff --git a/apisix/core/env.lua b/apisix/core/env.lua
index 2bab04327ba9..6a57a70edd15 100644
--- a/apisix/core/env.lua
+++ b/apisix/core/env.lua
@@ -87,6 +87,7 @@ end
function _M.fetch_by_uri(env_uri)
+ log.info("fetching data from env uri: ", env_uri)
local opts, err = parse_env_uri(env_uri)
if not opts then
return nil, err
diff --git a/apisix/core/request.lua b/apisix/core/request.lua
index e76bbf79a417..c5278b6b8072 100644
--- a/apisix/core/request.lua
+++ b/apisix/core/request.lua
@@ -144,6 +144,11 @@ local function modify_header(ctx, header_name, header_value, override)
req_add_header(header_name, header_value)
end
+ if ctx and ctx.var then
+ -- when the header is updated, clear cache of ctx.var
+ ctx.var["http_" .. str_lower(header_name)] = nil
+ end
+
if is_apisix_or and not changed then
-- if the headers are not changed before,
-- we can only update part of the cache instead of invalidating the whole
diff --git a/apisix/debug.lua b/apisix/debug.lua
index d1cb53d229db..588f02aca438 100644
--- a/apisix/debug.lua
+++ b/apisix/debug.lua
@@ -15,7 +15,7 @@
-- limitations under the License.
--
local require = require
-local yaml = require("tinyyaml")
+local yaml = require("lyaml")
local log = require("apisix.core.log")
local profile = require("apisix.core.profile")
local lfs = require("lfs")
@@ -130,7 +130,7 @@ local function read_debug_yaml()
local yaml_config = f:read("*a")
f:close()
- local debug_yaml_new = yaml.parse(yaml_config)
+ local debug_yaml_new = yaml.load(yaml_config)
if not debug_yaml_new then
log.error("failed to parse the content of file " .. debug_yaml_path)
return
diff --git a/apisix/plugins/datadog.lua b/apisix/plugins/datadog.lua
index 7f0ed8ab9ca0..972c0a2c7b9e 100644
--- a/apisix/plugins/datadog.lua
+++ b/apisix/plugins/datadog.lua
@@ -88,8 +88,8 @@ local function generate_tag(entry, const_tags)
core.table.insert(tags, "service_name:" .. entry.service_id)
end
- if entry.consumer and entry.consumer ~= "" then
- core.table.insert(tags, "consumer:" .. entry.consumer)
+ if entry.consumer and entry.consumer.username then
+ core.table.insert(tags, "consumer:" .. entry.consumer.username)
end
if entry.balancer_ip ~= "" then
core.table.insert(tags, "balancer_ip:" .. entry.balancer_ip)
diff --git a/apisix/plugins/jwt-auth.lua b/apisix/plugins/jwt-auth.lua
index 9fdc7a9b5b18..e2442e914aae 100644
--- a/apisix/plugins/jwt-auth.lua
+++ b/apisix/plugins/jwt-auth.lua
@@ -50,6 +50,10 @@ local schema = {
hide_credentials = {
type = "boolean",
default = false
+ },
+ key_claim_name = {
+ type = "string",
+ default = "key"
}
},
}
@@ -247,9 +251,9 @@ local function get_rsa_or_ecdsa_keypair(conf)
end
-local function get_real_payload(key, auth_conf, payload)
+local function get_real_payload(key, auth_conf, payload, key_claim_name)
local real_payload = {
- key = key,
+ [key_claim_name] = key,
exp = ngx_time() + auth_conf.exp
}
if payload then
@@ -261,7 +265,7 @@ local function get_real_payload(key, auth_conf, payload)
end
-local function sign_jwt_with_HS(key, consumer, payload)
+local function sign_jwt_with_HS(key, consumer, payload, key_claim_name)
local auth_secret, err = get_secret(consumer.auth_conf)
if not auth_secret then
core.log.error("failed to sign jwt, err: ", err)
@@ -274,7 +278,7 @@ local function sign_jwt_with_HS(key, consumer, payload)
typ = "JWT",
alg = consumer.auth_conf.algorithm
},
- payload = get_real_payload(key, consumer.auth_conf, payload)
+ payload = get_real_payload(key, consumer.auth_conf, payload, key_claim_name)
}
)
if not ok then
@@ -285,7 +289,7 @@ local function sign_jwt_with_HS(key, consumer, payload)
end
-local function sign_jwt_with_RS256_ES256(key, consumer, payload)
+local function sign_jwt_with_RS256_ES256(key, consumer, payload, key_claim_name)
local public_key, private_key, err = get_rsa_or_ecdsa_keypair(
consumer.auth_conf
)
@@ -304,7 +308,7 @@ local function sign_jwt_with_RS256_ES256(key, consumer, payload)
public_key,
}
},
- payload = get_real_payload(key, consumer.auth_conf, payload)
+ payload = get_real_payload(key, consumer.auth_conf, payload, key_claim_name)
}
)
if not ok then
@@ -348,9 +352,10 @@ function _M.rewrite(conf, ctx)
return 401, {message = "JWT token invalid"}
end
- local user_key = jwt_obj.payload and jwt_obj.payload.key
+ local key_claim_name = conf.key_claim_name
+ local user_key = jwt_obj.payload and jwt_obj.payload[key_claim_name]
if not user_key then
- return 401, {message = "missing user key in JWT token"}
+ return 401, {message = "missing " .. key_claim_name .. " claim in JWT token"}
end
local consumer_conf = consumer_mod.plugin(plugin_name)
@@ -395,6 +400,8 @@ local function gen_token()
local key = args.key
local payload = args.payload
+ local key_claim_name = args.key_claim_name or "key"
+
if payload then
payload = ngx.unescape_uri(payload)
end
@@ -415,7 +422,7 @@ local function gen_token()
core.log.info("consumer: ", core.json.delay_encode(consumer))
local sign_handler = algorithm_handler(consumer, true)
- local jwt_token = sign_handler(key, consumer, payload)
+ local jwt_token = sign_handler(key, consumer, payload, key_claim_name)
if jwt_token then
return core.response.exit(200, jwt_token)
end
diff --git a/apisix/plugins/key-auth.lua b/apisix/plugins/key-auth.lua
index f8cfddae0cf8..7196a13e6da0 100644
--- a/apisix/plugins/key-auth.lua
+++ b/apisix/plugins/key-auth.lua
@@ -77,7 +77,7 @@ function _M.rewrite(conf, ctx)
end
if not key then
- return 401, {message = "Missing API key found in request"}
+ return 401, {message = "Missing API key in request"}
end
local consumer_conf = consumer_mod.plugin(plugin_name)
diff --git a/apisix/schema_def.lua b/apisix/schema_def.lua
index f3207444832f..2e289db58a0c 100644
--- a/apisix/schema_def.lua
+++ b/apisix/schema_def.lua
@@ -714,6 +714,12 @@ _M.consumer = {
_M.upstream = upstream_schema
+local secret_uri_schema = {
+ type = "string",
+ pattern = "^\\$(secret|env|ENV)://"
+}
+
+
_M.ssl = {
type = "object",
properties = {
@@ -729,14 +735,13 @@ _M.ssl = {
cert = {
oneOf = {
certificate_scheme,
- -- TODO: uniformly define the schema of secret_uri
- { type = "string", pattern = "^\\$(secret|env)://"}
+ secret_uri_schema
}
},
key = {
oneOf = {
private_key_schema,
- { type = "string", pattern = "^\\$(secret|env)://"}
+ secret_uri_schema
}
},
sni = {
@@ -753,11 +758,21 @@ _M.ssl = {
},
certs = {
type = "array",
- items = certificate_scheme,
+ items = {
+ oneOf = {
+ certificate_scheme,
+ secret_uri_schema
+ }
+ }
},
keys = {
type = "array",
- items = private_key_schema,
+ items = {
+ oneOf = {
+ private_key_schema,
+ secret_uri_schema
+ }
+ }
},
client = {
type = "object",
diff --git a/apisix/secret.lua b/apisix/secret.lua
index 6ba02768db80..60e575b929f0 100644
--- a/apisix/secret.lua
+++ b/apisix/secret.lua
@@ -135,6 +135,7 @@ end
local function fetch_by_uri(secret_uri)
+ core.log.info("fetching data from secret uri: ", secret_uri)
local opts, err = parse_secret_uri(secret_uri)
if not opts then
return nil, err
diff --git a/apisix/ssl.lua b/apisix/ssl.lua
index f3c5f9b2eff9..ad820822c06e 100644
--- a/apisix/ssl.lua
+++ b/apisix/ssl.lua
@@ -18,7 +18,9 @@ local core = require("apisix.core")
local secret = require("apisix.secret")
local ngx_ssl = require("ngx.ssl")
local ngx_ssl_client = require("ngx.ssl.clienthello")
+local ffi = require("ffi")
+local C = ffi.C
local ngx_encode_base64 = ngx.encode_base64
local ngx_decode_base64 = ngx.decode_base64
local aes = require("resty.aes")
@@ -28,6 +30,10 @@ local assert = assert
local type = type
local ipairs = ipairs
+ffi.cdef[[
+unsigned long ERR_peek_error(void);
+void ERR_clear_error(void);
+]]
local cert_cache = core.lrucache.new {
ttl = 3600, count = 1024,
@@ -155,6 +161,12 @@ local function aes_decrypt_pkey(origin, field)
if decrypted then
return decrypted
end
+
+ if C.ERR_peek_error() then
+ -- clean up the error queue of OpenSSL to prevent
+ -- normal requests from being interfered with.
+ C.ERR_clear_error()
+ end
end
return nil, "decrypt ssl key failed"
@@ -266,8 +278,8 @@ function _M.check_ssl_conf(in_dp, conf)
end
for i = 1, numcerts do
- if not secret.check_secret_uri(conf.cert[i]) and
- not secret.check_secret_uri(conf.key[i]) then
+ if not secret.check_secret_uri(conf.certs[i]) and
+ not secret.check_secret_uri(conf.keys[i]) then
local ok, err = validate(conf.certs[i], conf.keys[i])
if not ok then
diff --git a/conf/config-default.yaml b/conf/config-default.yaml
index 953fb1f098a3..225f41dbb116 100755
--- a/conf/config-default.yaml
+++ b/conf/config-default.yaml
@@ -53,7 +53,7 @@ apisix:
memory_size: 50m # Size of the memory to store the cache index.
disk_size: 1G # Size of the disk to store the cache data.
disk_path: /tmp/disk_cache_one # Path to the cache file for disk cache.
- cache_levels: 1:2 # Cache hierarchy levels of disk cache.
+ cache_levels: "1:2" # Cache hierarchy levels of disk cache.
# - name: disk_cache_two
# memory_size: 50m
# disk_size: 1G
@@ -73,7 +73,7 @@ apisix:
ssl: radixtree_sni # radixtree_sni: match route by SNI
# http is the default proxy mode. proxy_mode can be one of `http`, `stream`, or `http&stream`
- proxy_mode: http
+ proxy_mode: "http"
# stream_proxy: # TCP/UDP L4 proxy
# tcp:
# - addr: 9100 # Set the TCP proxy listening ports.
diff --git a/conf/debug.yaml b/conf/debug.yaml
index 23c8d51a4672..bf825628b087 100644
--- a/conf/debug.yaml
+++ b/conf/debug.yaml
@@ -15,20 +15,20 @@
# limitations under the License.
#
basic:
- enable: false
+ enable: false # Enable the basic debug mode.
http_filter:
- enable: false # enable or disable this feature
- enable_header_name: X-APISIX-Dynamic-Debug # the header name of dynamic enable
+ enable: false # Enable HTTP filter to dynamically apply advanced debug settings.
+ enable_header_name: X-APISIX-Dynamic-Debug # If the header is present in a request, apply the advanced debug settings.
hook_conf:
- enable: false # enable or disable this feature
- name: hook_phase # the name of module and function list
- log_level: warn # log level
- is_print_input_args: true # print the input arguments
- is_print_return_value: true # print the return value
+ enable: false # Enable hook debug trace to log the target module function's input arguments or returned values.
+ name: hook_phase # Name of module and function list.
+ log_level: warn # Severity level for input arguments and returned values in the error log.
+ is_print_input_args: true # Print the input arguments.
+ is_print_return_value: true # Print the return value.
-hook_phase: # module and function list, name: hook_phase
- apisix: # required module name
- - http_access_phase # function name
+hook_phase: # Name of module and function list.
+ apisix: # Required module name.
+ - http_access_phase # Required function names.
- http_header_filter_phase
- http_body_filter_phase
- http_log_phase
diff --git a/docs/en/latest/admin-api.md b/docs/en/latest/admin-api.md
index e2807622ea2e..d928e7a6936d 100644
--- a/docs/en/latest/admin-api.md
+++ b/docs/en/latest/admin-api.md
@@ -1204,8 +1204,8 @@ For notes on ID syntax please refer to: [ID Syntax](#quick-note-on-id-syntax)
| ------------ | -------- | ------------------------ | -------------------------------------------------------------------------------------------------------------- | ------------------------------------------------ |
| cert | True | Certificate | HTTPS certificate. This field supports saving the value in Secret Manager using the [APISIX Secret](./terminology/secret.md) resource. | |
| key | True | Private key | HTTPS private key. This field supports saving the value in Secret Manager using the [APISIX Secret](./terminology/secret.md) resource. | |
-| certs | False | An array of certificates | Used for configuring multiple certificates for the same domain excluding the one provided in the `cert` field. | |
-| keys | False | An array of private keys | Private keys to pair with the `certs`. | |
+| certs | False | An array of certificates | Used for configuring multiple certificates for the same domain excluding the one provided in the `cert` field. This field supports saving the value in Secret Manager using the [APISIX Secret](./terminology/secret.md) resource. | |
+| keys | False | An array of private keys | Private keys to pair with the `certs`. This field supports saving the value in Secret Manager using the [APISIX Secret](./terminology/secret.md) resource. | |
| client.ca | False | Certificate | Sets the CA certificate that verifies the client. Requires OpenResty 1.19+. | |
| client.depth | False | Certificate | Sets the verification depth in client certificate chains. Defaults to 1. Requires OpenResty 1.19+. | |
| client.skip_mtls_uri_regex | False | An array of regular expressions, in PCRE format | Used to match URI, if matched, this request bypasses the client certificate checking, i.e. skip the MTLS. | ["/hello[0-9]+", "/foobar"] |
diff --git a/docs/en/latest/config.json b/docs/en/latest/config.json
index 72e5a62b0861..cd6aeb94b444 100644
--- a/docs/en/latest/config.json
+++ b/docs/en/latest/config.json
@@ -217,7 +217,8 @@
"items": [
"plugins/dubbo-proxy",
"plugins/mqtt-proxy",
- "plugins/kafka-proxy"
+ "plugins/kafka-proxy",
+ "plugins/http-dubbo"
]
}
]
@@ -380,6 +381,10 @@
{
"type": "doc",
"id": "ssl-protocol"
+ },
+ {
+ "type": "doc",
+ "id": "http3"
}
]
},
diff --git a/docs/en/latest/deployment-modes.md b/docs/en/latest/deployment-modes.md
index 90ae0cded0ea..016473db2131 100644
--- a/docs/en/latest/deployment-modes.md
+++ b/docs/en/latest/deployment-modes.md
@@ -267,6 +267,26 @@ plugins:
#END
```
+### How to configure Plugin Configs
+
+```yml
+plugin_configs:
+ -
+ id: 1
+ plugins:
+ response-rewrite:
+ body: "hello\n"
+routes:
+ - id: 1
+ uri: /hello
+ plugin_config_id: 1
+ upstream:
+ nodes:
+ "127.0.0.1:1980": 1
+ type: roundrobin
+#END
+```
+
### How to enable SSL
```yml
diff --git a/docs/en/latest/http3.md b/docs/en/latest/http3.md
new file mode 100644
index 000000000000..f25a2e5db888
--- /dev/null
+++ b/docs/en/latest/http3.md
@@ -0,0 +1,186 @@
+---
+title: HTTP/3 Protocol
+---
+
+
+
+[HTTP/3](https://en.wikipedia.org/wiki/HTTP/3) is the third major version of the Hypertext Transfer Protocol (HTTP). Unlike its predecessors which rely on TCP, HTTP/3 is based on [QUIC (Quick UDP Internet Connections) protocol](https://en.wikipedia.org/wiki/QUIC). It brings several benefits that collectively result in reduced latency and improved performance:
+
+* enabling seamless transition between different network connections, such as switching from Wi-Fi to mobile data.
+* eliminating head-of-line blocking, so that a lost packet does not block all streams.
+* negotiating TLS versions at the same time as the TLS handshakes, allowing for faster connections.
+* providing encryption by default, ensuring that all data transmitted over an HTTP/3 connection is protected and confidential.
+* providing zero round-trip time (0-RTT) when communicating with servers that clients already established connections to.
+
+APISIX currently supports HTTP/3 connections between downstream clients and APISIX. HTTP/3 connections with upstream services are not yet supported, and contributions are welcomed.
+
+:::caution
+
+This feature is currently experimental and not recommended for production use.
+
+:::
+
+This document will show you how to configure APISIX to enable HTTP/3 connections between client and APISIX and document a few known issues.
+
+## Usage
+
+### Enable HTTP/3 in APISIX
+
+Enable HTTP/3 on port `9443` (or a different port) by adding the following configurations to APISIX's `config.yaml` configuration file:
+
+```yaml title="config.yaml"
+apisix:
+ ssl:
+ listen:
+ - port: 9443
+ enable_http3: true
+ ssl_protocols: TLSv1.3
+```
+
+:::info
+
+If you are deploying APISIX using Docker, make sure to allow UDP in the HTTP3 port, such as `-p 9443:9443/udp`.
+
+:::
+
+Then reload APISIX for configuration changes to take effect:
+
+```shell
+apisix reload
+```
+
+### Generate Certificates and Keys
+
+HTTP/3 requires TLS. You can leverage the purchased certificates or self-generate them, whichever applicable.
+
+To self-generate, first generate the certificate authority (CA) key and certificate:
+
+```shell
+openssl genrsa -out ca.key 2048 && \
+ openssl req -new -sha256 -key ca.key -out ca.csr -subj "/CN=ROOTCA" && \
+ openssl x509 -req -days 36500 -sha256 -extensions v3_ca -signkey ca.key -in ca.csr -out ca.crt
+```
+
+Next, generate the key and certificate with a common name for APISIX, and sign with the CA certificate:
+
+```shell
+openssl genrsa -out server.key 2048 && \
+ openssl req -new -sha256 -key server.key -out server.csr -subj "/CN=test.com" && \
+ openssl x509 -req -days 36500 -sha256 -extensions v3_req \
+ -CA ca.crt -CAkey ca.key -CAserial ca.srl -CAcreateserial \
+ -in server.csr -out server.crt
+```
+
+### Configure HTTPS
+
+Optionally load the content stored in `server.crt` and `server.key` into shell variables:
+
+```shell
+server_cert=$(cat server.crt)
+server_key=$(cat server.key)
+```
+
+Create an SSL certificate object to save the server certificate and its key:
+
+```shell
+curl -i "http://127.0.0.1:9180/apisix/admin/ssls" -X PUT -d '
+{
+ "id": "quickstart-tls-client-ssl",
+ "sni": "test.com",
+ "cert": "'"${server_cert}"'",
+ "key": "'"${server_key}"'"
+}'
+```
+
+### Create a Route
+
+Create a sample route to `httpbin.org`:
+
+```shell
+curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT -d '
+{
+ "id":"httpbin-route",
+ "uri":"/get",
+ "upstream": {
+ "type":"roundrobin",
+ "nodes": {
+ "httpbin.org:80": 1
+ }
+ }
+}'
+```
+
+### Verify HTTP/3 Connections
+
+Install [static-curl](https://github.com/stunnel/static-curl) or any other curl executable that has HTTP/3 support.
+
+Send a request to the route:
+
+```shell
+curl -kv --http3-only \
+ -H "Host: test.com" \
+ --resolve "test.com:9443:127.0.0.1" "https://test.com:9443/get"
+```
+
+You should receive an `HTTP/3 200` response similar to the following:
+
+```text
+* Added test.com:9443:127.0.0.1 to DNS cache
+* Hostname test.com was found in DNS cache
+* Trying 127.0.0.1:9443...
+* QUIC cipher selection: TLS_AES_128_GCM_SHA256:TLS_AES_256_GCM_SHA384:TLS_CHACHA20_POLY1305_SHA256:TLS_AES_128_CCM_SHA256
+* Skipped certificate verification
+* Connected to test.com (127.0.0.1) port 9443
+* using HTTP/3
+* [HTTP/3] [0] OPENED stream for https://test.com:9443/get
+* [HTTP/3] [0] [:method: GET]
+* [HTTP/3] [0] [:scheme: https]
+* [HTTP/3] [0] [:authority: test.com]
+* [HTTP/3] [0] [:path: /get]
+* [HTTP/3] [0] [user-agent: curl/8.7.1]
+* [HTTP/3] [0] [accept: */*]
+> GET /get HTTP/3
+> Host: test.com
+> User-Agent: curl/8.7.1
+> Accept: */*
+>
+* Request completely sent off
+< HTTP/3 200
+...
+{
+ "args": {},
+ "headers": {
+ "Accept": "*/*",
+ "Content-Length": "0",
+ "Host": "test.com",
+ "User-Agent": "curl/8.7.1",
+ "X-Amzn-Trace-Id": "Root=1-6656013a-27da6b6a34d98e3e79baaf5b",
+ "X-Forwarded-Host": "test.com"
+ },
+ "origin": "172.19.0.1, 123.40.79.456",
+ "url": "http://test.com/get"
+}
+* Connection #0 to host test.com left intact
+```
+
+## Known Issues
+
+- For APISIX-3.9, test cases of Tongsuo will fail because the Tongsuo does not support QUIC TLS.
+- APISIX-3.9 is based on NGINX-1.25.3 with vulnerabilities in HTTP/3 (CVE-2024-24989, CVE-2024-24990).
diff --git a/docs/en/latest/plugins/body-transformer.md b/docs/en/latest/plugins/body-transformer.md
index 0d903496f709..15df4dbc456d 100644
--- a/docs/en/latest/plugins/body-transformer.md
+++ b/docs/en/latest/plugins/body-transformer.md
@@ -118,8 +118,8 @@ For example, parse YAML to JSON yourself:
```
{%
- local yaml = require("tinyyaml")
- local body = yaml.parse(_body)
+ local yaml = require("lyaml")
+ local body = yaml.load(_body)
%}
{"foobar":"{{body.foobar.foo .. " " .. body.foobar.bar}}"}
```
diff --git a/docs/en/latest/plugins/http-dubbo.md b/docs/en/latest/plugins/http-dubbo.md
new file mode 100755
index 000000000000..f5500983559e
--- /dev/null
+++ b/docs/en/latest/plugins/http-dubbo.md
@@ -0,0 +1,128 @@
+---
+title: http-dubbo
+keywords:
+ - Apache APISIX
+ - API Gateway
+ - Plugin
+ - http-dubbo
+ - http to dubbo
+ - transcode
+description: This document contains information about the Apache APISIX http-dubbo Plugin.
+---
+
+
+
+## Description
+
+The `http-dubbo` plugin can transcode between http and Dubbo (Note: in
+Dubbo 2.x, the serialization type of the upstream service must be fastjson).
+
+## Attributes
+
+| Name | Type | Required | Default | Valid values | Description |
+|--------------------------|---------|----------|---------|--------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| service_name | string | True | | | Dubbo service name |
+| service_version | string | False | 0.0.0 | | Dubbo service version |
+| method | string | True | | | Dubbo service method name |
+| params_type_desc | string | True | | | Description of the Dubbo service method signature |
+| serialization_header_key | string | False | | | If `serialization_header_key` is set, the plugin will read this request header to determine if the body has already been serialized according to the Dubbo protocol. If the value of this request header is true, the plugin will not modify the body content and will directly consider it as Dubbo request parameters. If it is false, the developer is required to pass parameters in the format of Dubbo's generic invocation, and the plugin will handle serialization. Note: Due to differences in precision between Lua and Java, serialization by the plugin may lead to parameter precision discrepancies. |
+| serialized | boolean | False | false | [true, false] | Same as `serialization_header_key`. Priority is lower than `serialization_header_key`. |
+| connect_timeout | number | False | 6000 | | Upstream tcp connect timeout |
+| read_timeout | number | False | 6000 | | Upstream tcp read_timeout |
+| send_timeout | number | False | 6000 | | Upstream tcp send_timeout |
+
+## Enable Plugin
+
+The example below enables the `http-dubbo` Plugin on the specified Route:
+
+:::note
+You can fetch the `admin_key` from `config.yaml` and save to an environment variable with the following command:
+
+```bash
+admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g')
+```
+
+:::
+
+```shell
+curl -i http://127.0.0.1:9180/apisix/admin/routes/1 \
+-H "X-API-KEY: $admin_key" -X PUT -d '
+{
+ "uri": "/TestService/testMethod",
+ "plugins": {
+ "http-dubbo": {
+ "method": "testMethod",
+ "params_type_desc": "Ljava/lang/Long;Ljava/lang/Integer;",
+ "serialized": true,
+ "service_name": "com.xxx.xxx.TestService",
+ "service_version": "0.0.0"
+ }
+ },
+ "upstream": {
+ "type": "roundrobin",
+ "nodes": {
+ "127.0.0.1:20880": 1
+ }
+ }
+}'
+```
+
+## Example usage
+
+Once you have configured the Plugin as shown above, you can make a request as shown below:
+
+```shell
+curl --location 'http://127.0.0.1:9080/TestService/testMethod' \
+--data '1
+2'
+```
+
+## How to Get `params_type_desc`
+
+```java
+Method[] declaredMethods = YourService.class.getDeclaredMethods();
+String params_type_desc = ReflectUtils.getDesc(Arrays.stream(declaredMethods).filter(it -> it.getName().equals("yourmethod")).findAny().get().getParameterTypes());
+
+// If there are method overloads, you need to find the method you want to expose.
+// ReflectUtils is a Dubbo implementation.
+```
+
+## How to Serialize JSON According to Dubbo Protocol
+
+To prevent loss of precision, we recommend using pre-serialized bodies for requests. The serialization rules for Dubbo's
+fastjson are as follows:
+
+- Convert each parameter to a JSON string using toJSONString.
+- Separate each parameter with a newline character `\n`.
+
+Some languages and libraries may produce unchanged results when calling toJSONString on strings or numbers. In such
+cases, you may need to manually handle some special cases. For example:
+
+- The string `abc"` needs to be encoded as `"abc\""`.
+- The string `123` needs to be encoded as `"123"`.
+
+Abstract class, parent class, or generic type as input parameter signature, when the input parameter requires a specific
+type. Serialization requires writing specific type information.
+Refer to [WriteClassName](https://github.com/alibaba/fastjson/wiki/SerializerFeature_cn) for more details.
+
+## Delete Plugin
+
+To remove the `http-dubbo` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration.
+APISIX will automatically reload and you do not have to restart for this to take effect.
diff --git a/docs/en/latest/plugins/jwt-auth.md b/docs/en/latest/plugins/jwt-auth.md
index e44fd58a5880..b00cf146b13e 100644
--- a/docs/en/latest/plugins/jwt-auth.md
+++ b/docs/en/latest/plugins/jwt-auth.md
@@ -58,7 +58,8 @@ For Route:
| header | string | False | authorization | The header to get the token from. |
| query | string | False | jwt | The query string to get the token from. Lower priority than header. |
| cookie | string | False | jwt | The cookie to get the token from. Lower priority than query. |
-| hide_credentials | boolean | False | false | Set to true will not pass the authorization request of header\query\cookie to the Upstream.|
+| hide_credentials | boolean | False | false | Set to true will not pass the authorization request of header\query\cookie to the Upstream. |
+| key_claim_name | string | False | key | The name of the JWT claim that contains the user key (corresponds to Consumer's key attribute). |
You can implement `jwt-auth` with [HashiCorp Vault](https://www.vaultproject.io/) to store and fetch secrets and RSA keys pairs from its [encrypted KV engine](https://developer.hashicorp.com/vault/docs/secrets/kv) using the [APISIX Secret](../terminology/secret.md) resource.
diff --git a/docs/en/latest/plugins/key-auth.md b/docs/en/latest/plugins/key-auth.md
index 93705111d5bc..985036640cb0 100644
--- a/docs/en/latest/plugins/key-auth.md
+++ b/docs/en/latest/plugins/key-auth.md
@@ -141,7 +141,7 @@ curl http://127.0.0.2:9080/index.html -i
```
HTTP/1.1 401 Unauthorized
...
-{"message":"Missing API key found in request"}
+{"message":"Missing API key in request"}
```
```shell
diff --git a/docs/en/latest/plugins/opentelemetry.md b/docs/en/latest/plugins/opentelemetry.md
index eeaaf7253f67..9a0ed137350a 100644
--- a/docs/en/latest/plugins/opentelemetry.md
+++ b/docs/en/latest/plugins/opentelemetry.md
@@ -53,18 +53,26 @@ You can set up the collector by configuring it in you configuration file (`conf/
| Name | Type | Default | Description |
|--------------------------------------------|---------|---------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
-| trace_id_source | enum | random | Source of the trace ID. Valid values are `random` or `x-request-id`. When set to `x-request-id`, the value of the `x-request-id` header will be used as trace ID. Make sure that is matches the regex pattern `[0-9a-f]{32}`. |
+| trace_id_source | enum | x-request-id | Source of the trace ID. Valid values are `random` or `x-request-id`. When set to `x-request-id`, the value of the `x-request-id` header will be used as trace ID. Make sure that it matches the regex pattern `[0-9a-f]{32}`. |
| resource | object | | Additional [resource](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/resource/sdk.md) appended to the trace. |
| collector | object | {address = "127.0.0.1:4318", request_timeout = 3} | OpenTelemetry Collector configuration. |
| collector.address | string | 127.0.0.1:4318 | Collector address. If the collector serves on https, use https://127.0.0.1:4318 as the address. |
| collector.request_timeout | integer | 3 | Report request timeout in seconds. |
| collector.request_headers | object | | Report request HTTP headers. |
| batch_span_processor | object | | Trace span processor. |
-| batch_span_processor.drop_on_queue_full | boolean | true | When set to `true`, drops the span when queue is full. Otherwise, force process batches. |
-| batch_span_processor.max_queue_size | integer | 2048 | Maximum queue size for buffering spans for delayed processing. |
-| batch_span_processor.batch_timeout | number | 5 | Maximum time in seconds for constructing a batch. |
-| batch_span_processor.max_export_batch_size | integer | 256 | Maximum number of spans to process in a single batch. |
-| batch_span_processor.inactive_timeout | number | 2 | Time interval in seconds between processing batches. |
+| batch_span_processor.drop_on_queue_full | boolean | false | When set to `true`, drops the span when queue is full. Otherwise, force process batches. |
+| batch_span_processor.max_queue_size | integer | 1024 | Maximum queue size for buffering spans for delayed processing. |
+| batch_span_processor.batch_timeout | number | 2 | Maximum time in seconds for constructing a batch. |
+| batch_span_processor.max_export_batch_size | integer | 16 | Maximum number of spans to process in a single batch. |
+| batch_span_processor.inactive_timeout | number | 1 | Time interval in seconds between processing batches. |
+
+:::note
+
+If you find a `bad argument #1 to '?' (invalid value)` error triggered by the `hex2bytes` function in error log, it's essential to verify if your traceId matches the specified regex pattern `[0-9a-f]{32}`, as required by opentelemetry's [traceId format](https://opentelemetry.io/docs/specs/otel/trace/api/#retrieving-the-traceid-and-spanid).
+
+For instance, a possible scenario occurs when the plugin attribute `trace_id_source` is configured as `x-request-id`, and requests include an x-request-id header generated by Envoy. Envoy typically uses a [UUID](https://www.envoyproxy.io/docs/envoy/latest/intro/arch_overview/observability/tracing#trace-context-propagation) to create this header by default. When the opentelemetry plugin adopts this UUID as the traceId, the presence of hyphens in the UUID can cause issues. Since the UUID format with hyphens does not comply with the expected traceId format, it results in errors when attempting to push traces to the collector.
+
+:::
You can configure these as shown below:
diff --git a/docs/zh/latest/admin-api.md b/docs/zh/latest/admin-api.md
index 461771a95d72..5cefb428b0a0 100644
--- a/docs/zh/latest/admin-api.md
+++ b/docs/zh/latest/admin-api.md
@@ -1205,8 +1205,8 @@ SSL 资源请求地址:/apisix/admin/ssls/{id}
| ----------- | ------ | -------------- | ------------------------------------------------------------------------------------------------------ | ------------------------------------------------ |
| cert | 是 | 证书 | HTTP 证书。该字段支持使用 [APISIX Secret](./terminology/secret.md) 资源,将值保存在 Secret Manager 中。 | |
| key | 是 | 私钥 | HTTPS 证书私钥。该字段支持使用 [APISIX Secret](./terminology/secret.md) 资源,将值保存在 Secret Manager 中。 | |
-| certs | 否 | 证书字符串数组 | 当你想给同一个域名配置多个证书时,除了第一个证书需要通过 `cert` 传递外,剩下的证书可以通过该参数传递上来。 | |
-| keys | 否 | 私钥字符串数组 | `certs` 对应的证书私钥,需要与 `certs` 一一对应。 | |
+| certs | 否 | 证书字符串数组 | 当你想给同一个域名配置多个证书时,除了第一个证书需要通过 `cert` 传递外,剩下的证书可以通过该参数传递上来。该字段支持使用 [APISIX Secret](./terminology/secret.md) 资源,将值保存在 Secret Manager 中。 | |
+| keys | 否 | 私钥字符串数组 | `certs` 对应的证书私钥,需要与 `certs` 一一对应。该字段支持使用 [APISIX Secret](./terminology/secret.md) 资源,将值保存在 Secret Manager 中。 | |
| client.ca | 否 | 证书 | 设置将用于客户端证书校验的 `CA` 证书。该特性需要 OpenResty 为 1.19 及以上版本。 | |
| client.depth | 否 | 辅助 | 设置客户端证书校验的深度,默认为 1。该特性需要 OpenResty 为 1.19 及以上版本。 | |
| client.skip_mtls_uri_regex | 否 | PCRE 正则表达式数组 | 用来匹配请求的 URI,如果匹配,则该请求将绕过客户端证书的检查,也就是跳过 MTLS。 | ["/hello[0-9]+", "/foobar"] |
diff --git a/docs/zh/latest/config.json b/docs/zh/latest/config.json
index 3c3aa91a45f0..09d8130ff759 100644
--- a/docs/zh/latest/config.json
+++ b/docs/zh/latest/config.json
@@ -201,7 +201,8 @@
"label": "其它协议",
"items": [
"plugins/dubbo-proxy",
- "plugins/mqtt-proxy"
+ "plugins/mqtt-proxy",
+ "plugins/http-dubbo"
]
}
]
@@ -325,6 +326,10 @@
{
"type": "doc",
"id": "ssl-protocol"
+ },
+ {
+ "type": "doc",
+ "id": "http3"
}
]
},
diff --git a/docs/zh/latest/http3.md b/docs/zh/latest/http3.md
new file mode 100644
index 000000000000..5ea2f129137c
--- /dev/null
+++ b/docs/zh/latest/http3.md
@@ -0,0 +1,186 @@
+---
+title: HTTP3 协议
+---
+
+
+
+[HTTP/3](https://en.wikipedia.org/wiki/HTTP/3) 是 Hypertext Transfer Protocol(HTTP) 的第三个主要版本。与依赖 TCP 的前辈不同,HTTP/3 基于 [QUIC (Quick UDP Internet Connections) protocol](https://en.wikipedia.org/wiki/QUIC)。它带来了多项好处,减少了延迟并提高了性能:
+
+* 实现不同网络连接之间的无缝过渡,例如从 Wi-Fi 切换到移动数据。
+* 消除队头阻塞,以便丢失的数据包不会阻塞所有流。
+* 在 TLS 握手的同时协商 TLS 版本,从而实现更快的连接。
+* 默认提供加密,确保通过 HTTP/3 连接传输的所有数据都受到保护和保密。
+* 在与客户端已建立连接的服务器通信时提供零往返时间 (0-RTT)。
+
+APISIX 目前支持下游客户端和 APISIX 之间的 HTTP/3 连接。尚不支持与上游服务的 HTTP/3 连接。欢迎社区贡献。
+
+:::caution
+
+此功能尚未经过大规模测试,因此不建议用于生产使用。
+
+:::
+
+本文档将向您展示如何配置 APISIX 以在客户端和 APISIX 之间启用 HTTP/3 连接,并记录一些已知问题。
+
+## 使用示例
+
+### 启用 HTTP/3
+
+将以下配置添加到 APISIX 的配置文件。该配置将在端口 `9443`(或其他端口)上启用 HTTP/3:
+
+```yaml title="config.yaml"
+apisix:
+ ssl:
+ listen:
+ - port: 9443
+ enable_http3: true
+ ssl_protocols: TLSv1.3
+```
+
+:::info
+
+如果您使用 Docker 部署 APISIX,请确保在 HTTP3 端口中允许 UDP,例如 `-p 9443:9443/udp`。
+
+:::
+
+然后重新加载 APISIX 以使配置更改生效:
+
+```shell
+apisix reload
+```
+
+### 生成证书和密钥
+
+HTTP/3 需要 TLS。您可以利用购买的证书或自行生成证书。
+
+如自行生成,首先生成证书颁发机构 (CA) 密钥和证书:
+
+```shell
+openssl genrsa -out ca.key 2048 && \
+ openssl req -new -sha256 -key ca.key -out ca.csr -subj "/CN=ROOTCA" && \
+ openssl x509 -req -days 36500 -sha256 -extensions v3_ca -signkey ca.key -in ca.csr -out ca.crt
+```
+
+接下来,生成具有 APISIX 通用名称的密钥和证书,并使用 CA 证书进行签名:
+
+```shell
+openssl genrsa -out server.key 2048 && \
+ openssl req -new -sha256 -key server.key -out server.csr -subj "/CN=test.com" && \
+ openssl x509 -req -days 36500 -sha256 -extensions v3_req \
+ -CA ca.crt -CAkey ca.key -CAserial ca.srl -CAcreateserial \
+ -in server.csr -out server.crt
+```
+
+### 配置 HTTPS
+
+可选择性地将存储在 `server.crt` 和 `server.key` 中的内容加载到环境变量中:
+
+```shell
+server_cert=$(cat server.crt)
+server_key=$(cat server.key)
+```
+
+创建一个保存服务器证书及其密钥的 SSL 对象:
+
+```shell
+curl -i "http://127.0.0.1:9180/apisix/admin/ssls" -X PUT -d '
+{
+ "id": "quickstart-tls-client-ssl",
+ "sni": "test.com",
+ "cert": "'"${server_cert}"'",
+ "key": "'"${server_key}"'"
+}'
+```
+
+### 创建路由
+
+创建一个路由至 `httpbin.org`:
+
+```shell
+curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT -d '
+{
+ "id":"httpbin-route",
+ "uri":"/get",
+ "upstream": {
+ "type":"roundrobin",
+ "nodes": {
+ "httpbin.org:80": 1
+ }
+ }
+}'
+```
+
+### 验证 HTTP/3 连接
+
+验证前需要安装支持 HTTP/3 的 curl,如 [static-curl](https://github.com/stunnel/static-curl) 或其他支持 HTTP/3 的 curl。
+
+发送一个请求到路由:
+
+```shell
+curl -kv --http3-only \
+ -H "Host: test.com" \
+ --resolve "test.com:9443:127.0.0.1" "https://test.com:9443/get"
+```
+
+应收到 `HTTP/3 200` 相应如下:
+
+```text
+* Added test.com:9443:127.0.0.1 to DNS cache
+* Hostname test.com was found in DNS cache
+* Trying 127.0.0.1:9443...
+* QUIC cipher selection: TLS_AES_128_GCM_SHA256:TLS_AES_256_GCM_SHA384:TLS_CHACHA20_POLY1305_SHA256:TLS_AES_128_CCM_SHA256
+* Skipped certificate verification
+* Connected to test.com (127.0.0.1) port 9443
+* using HTTP/3
+* [HTTP/3] [0] OPENED stream for https://test.com:9443/get
+* [HTTP/3] [0] [:method: GET]
+* [HTTP/3] [0] [:scheme: https]
+* [HTTP/3] [0] [:authority: test.com]
+* [HTTP/3] [0] [:path: /get]
+* [HTTP/3] [0] [user-agent: curl/8.7.1]
+* [HTTP/3] [0] [accept: */*]
+> GET /get HTTP/3
+> Host: test.com
+> User-Agent: curl/8.7.1
+> Accept: */*
+>
+* Request completely sent off
+< HTTP/3 200
+...
+{
+ "args": {},
+ "headers": {
+ "Accept": "*/*",
+ "Content-Length": "0",
+ "Host": "test.com",
+ "User-Agent": "curl/8.7.1",
+ "X-Amzn-Trace-Id": "Root=1-6656013a-27da6b6a34d98e3e79baaf5b",
+ "X-Forwarded-Host": "test.com"
+ },
+ "origin": "172.19.0.1, 123.40.79.456",
+ "url": "http://test.com/get"
+}
+* Connection #0 to host test.com left intact
+```
+
+## 已知问题
+
+- 对于 APISIX-3.9, Tongsuo 相关测试用例会失败,因为 Tongsuo 不支持 QUIC TLS。
+- APISIX-3.9 基于 NGINX-1.25.3,存在 HTTP/3 漏洞(CVE-2024-24989、CVE-2024-24990)。
diff --git a/docs/zh/latest/plugins/http-dubbo.md b/docs/zh/latest/plugins/http-dubbo.md
new file mode 100755
index 000000000000..0fb7e9f2e0e1
--- /dev/null
+++ b/docs/zh/latest/plugins/http-dubbo.md
@@ -0,0 +1,124 @@
+---
+title: http-dubbo
+keywords:
+ - Apache APISIX
+ - API 网关
+ - Plugin
+ - http-dubbo
+ - http to dubbo
+description: 本文介绍了关于 Apache APISIX `http-dubbo` 插件的基本信息及使用方法。
+---
+
+
+
+## 描述
+
+`http-dubbo` 插件可以将 http 请求 encode 为 dubbo 协议转发给上游服务(注意:在 dubbo2.x 时上游服务的序列化类型必须是 fastjson)
+
+## 属性
+
+| 名称 | 类型 | 必选项 | 默认值 | 有效值 | 描述 |
+| ------------------------ | ------- |-----| ------ | ----------- |--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| service_name | string | 是 | | | dubbo 服务名 |
+| service_version | string | 否 | 0.0.0 | | dubbo 服务版本 默认 0.0.0 |
+| method | string | 是 | | | dubbo 服务方法名 |
+| params_type_desc | string | 否 | | | dubbo 服务方法签名描述,入参如果是 void 可不填写 |
+| serialization_header_key | string | 否 | | | 插件会读取该请求头判断 body 是否已经按照 dubbo 协议序列化完毕。如果该请求头的值为 true 则插件不会更改 body 内容,直接把他当作 dubbo 请求参数。如果为 false 则要求开发者按照 dubbo 泛化调用的格式传递参数,由插件进行序列化。注意:由于 lua 和 java 的插件序列化精度不同,可能会导致参数精度不同。 |
+| serialized | boolean | 否 | false | [true, false] | 和`serialization_header_key`一样。优先级低于`serialization_header_key` |
+| connect_timeout | number | 否 | 6000 | | 上游服务 tcp connect_timeout |
+| read_timeout | number | 否 | 6000 | | 上游服务 tcp read_timeout |
+| send_timeout | number | 否 | 6000 | | 上游服务 tcp send_timeout |
+
+## 启用插件
+
+以下示例展示了如何在指定路由中启用 `http-dubbo` 插件:
+
+:::note
+
+您可以这样从 `config.yaml` 中获取 `admin_key` 并存入环境变量:
+
+```bash
+admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g')
+```
+
+:::
+
+```shell
+curl -i http://127.0.0.1:9180/apisix/admin/routes/1 \
+-H "X-API-KEY: $admin_key" -X PUT -d '
+{
+ "uri": "/TestService/testMethod",
+ "plugins": {
+ "http-dubbo": {
+ "method": "testMethod",
+ "params_type_desc": "Ljava/lang/Long;Ljava/lang/Integer;",
+ "serialized": true,
+ "service_name": "com.xxx.xxx.TestService",
+ "service_version": "0.0.0"
+ }
+ },
+ "upstream": {
+ "type": "roundrobin",
+ "nodes": {
+ "127.0.0.1:1980": 1
+ }
+ }
+}'
+```
+
+## 测试插件
+
+通过上述命令启用插件后,可以使用如下命令测试插件是否启用成功:
+
+```shell
+curl --location 'http://127.0.0.1:9080/TestService/testMethod' \
+--data '1
+2'
+```
+
+## 如何获取 params_type_desc
+
+```java
+Method[] declaredMethods = YourService.class.getDeclaredMethods();
+String params_type_desc = ReflectUtils.getDesc(Arrays.stream(declaredMethods).filter(it->it.getName().equals("yourmethod")).findAny().get().getParameterTypes());
+
+//方法重载情况下需要找自己需要暴露的方法 ReflectUtils 为 dubbo 实现
+```
+
+## 如何按照 dubbo 协议使用 json 进行序列化
+
+为了防止精度丢失。我们推荐使用序列化好的 body 进行请求。
+dubbo 的 fastjson 序列化规则如下:
+
+- 每个参数之间使用 toJSONString 转化为 JSON 字符串
+
+- 每个参数之间使用换行符 `\n` 分隔
+
+部分语言和库在字符串或数字调用 toJSONString 后结果是不变的这可能需要你手动处理一些特殊情况例如:
+
+- 字符串 `abc"` 需要被 encode 为 `"abc\""`
+
+- 字符串 `123` 需要被 encode 为 `"123"`
+
+抽象类,父类或者泛型作为入参签名,入参需要具体类型时。序列化需要写入具体的类型信息具体参考 [WriteClassName](https://github.com/alibaba/fastjson/wiki/SerializerFeature_cn)
+
+## 删除插件
+
+当你需要禁用 `http-dubbo` 插件时,可以通过以下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务。
diff --git a/docs/zh/latest/plugins/key-auth.md b/docs/zh/latest/plugins/key-auth.md
index f45b514e30bd..13a08e7d94ee 100644
--- a/docs/zh/latest/plugins/key-auth.md
+++ b/docs/zh/latest/plugins/key-auth.md
@@ -148,7 +148,7 @@ curl http://127.0.0.2:9080/index.html -i
```shell
HTTP/1.1 401 Unauthorized
...
-{"message":"Missing API key found in request"}
+{"message":"Missing API key in request"}
```
```shell
diff --git a/docs/zh/latest/plugins/opentelemetry.md b/docs/zh/latest/plugins/opentelemetry.md
index 94dd63a3d44a..f6e322dbe77d 100644
--- a/docs/zh/latest/plugins/opentelemetry.md
+++ b/docs/zh/latest/plugins/opentelemetry.md
@@ -54,18 +54,26 @@ description: 本文介绍了关于 Apache APISIX `opentelemetry` 插件的基本
| 名称 | 类型 | 默认值 | 描述 |
| ------------------------------------------ | ------- | ------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------- |
-| trace_id_source | enum | random | trace ID 的来源。有效值为:`random` 或 `x-request-id`。当设置为 `x-request-id` 时,`x-request-id` 头的值将用作跟踪 ID。请确保当前请求 ID 是符合 TraceID 规范的:`[0-9a-f]{32}`。 |
+| trace_id_source | enum | x-request-id | trace ID 的来源。有效值为:`random` 或 `x-request-id`。当设置为 `x-request-id` 时,`x-request-id` 头的值将用作跟踪 ID。请确保当前请求 ID 是符合 TraceID 规范的:`[0-9a-f]{32}`。 |
| resource | object | | 追加到 trace 的额外 [resource](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/resource/sdk.md)。 |
| collector | object | {address = "127.0.0.1:4318", request_timeout = 3} | OpenTelemetry Collector 配置。 |
| collector.address | string | 127.0.0.1:4318 | 数据采集服务的地址。如果数据采集服务使用的是 HTTPS 协议,可以将 address 设置为 https://127.0.0.1:4318。 |
| collector.request_timeout | integer | 3 | 数据采集服务上报请求超时时长,单位为秒。 |
| collector.request_headers | object | | 数据采集服务上报请求附加的 HTTP 请求头。 |
| batch_span_processor | object | | trace span 处理器参数配置。 |
-| batch_span_processor.drop_on_queue_full | boolean | true | 如果设置为 `true` 时,则在队列排满时删除 span。否则,强制处理批次。|
-| batch_span_processor.max_queue_size | integer | 2048 | 处理器缓存队列容量的最大值。 |
-| batch_span_processor.batch_timeout | number | 5 | 构造一批 span 超时时间,单位为秒。 |
-| batch_span_processor.max_export_batch_size | integer | 256 | 单个批次中要处理的 span 数量。 |
-| batch_span_processor.inactive_timeout | number | 2 | 两个处理批次之间的时间间隔,单位为秒。 |
+| batch_span_processor.drop_on_queue_full | boolean | false | 如果设置为 `true` 时,则在队列排满时删除 span。否则,强制处理批次。|
+| batch_span_processor.max_queue_size | integer | 1024 | 处理器缓存队列容量的最大值。 |
+| batch_span_processor.batch_timeout | number | 2 | 构造一批 span 超时时间,单位为秒。 |
+| batch_span_processor.max_export_batch_size | integer | 16 | 单个批次中要处理的 span 数量。 |
+| batch_span_processor.inactive_timeout | number | 1 | 两个处理批次之间的时间间隔,单位为秒。 |
+
+:::note
+
+如果你在 error log 中发现由 hex2bytes 函数引发的 `bad argument #1 to '?' (invalid value)` 错误,务必确认你的 traceId 是否满足 opentelemetry 的 [traceId 格式](https://opentelemetry.io/docs/specs/otel/trace/api/#retrieving-the-traceid-and-spanid) 所需的正则规范`[0-9a-f]{32}`。
+
+例如,当插件属性 `trace_id_source` 配置为 `x-request-id` 时,如果请求包含由 Envoy 生成的 x-request-id 请求头,可能会发生上述情况。Envoy 默认使用 [UUID](https://www.envoyproxy.io/docs/envoy/latest/intro/arch_overview/observability/tracing#trace-context-propagation) 生成该请求头。当 opentelemetry 插件将此 UUID 作为 traceId 时,UUID 中的 `-` 可能会引起问题。由于带有 `-` 的 UUID 格式与 traceId 格式不符,因此尝试将跟踪推送到收集器时会导致错误。
+
+:::
你可以参考以下示例进行配置:
diff --git a/t/admin/ssl.t b/t/admin/ssl.t
index b03eb494f854..24a2c9962bb0 100644
--- a/t/admin/ssl.t
+++ b/t/admin/ssl.t
@@ -680,3 +680,123 @@ GET /t
GET /t
--- response_body chomp
passed
+
+
+
+=== TEST 21: set ssl with sercret
+--- config
+ location /t {
+ content_by_lua_block {
+ local core = require("apisix.core")
+ local t = require("lib.test_admin")
+
+ local data = {
+ sni = "test.com",
+ cert = "$secret://vault/test/ssl/test.com.crt",
+ key = "$secret://vault/test/ssl/test.com.key",
+ certs = {"$secret://vault/test/ssl/test.com.2.crt"},
+ keys = {"$secret://vault/test/ssl/test.com.2.key"}
+ }
+
+ local code, body = t.test('/apisix/admin/ssls/1',
+ ngx.HTTP_PUT,
+ core.json.encode(data),
+ [[{
+ "value": {
+ "sni": "test.com",
+ "cert": "$secret://vault/test/ssl/test.com.crt",
+ "key": "$secret://vault/test/ssl/test.com.key",
+ "certs": ["$secret://vault/test/ssl/test.com.2.crt"],
+ "keys": ["$secret://vault/test/ssl/test.com.2.key"]
+ },
+ "key": "/apisix/ssls/1"
+ }]]
+ )
+
+ ngx.status = code
+ ngx.say(body)
+ }
+ }
+--- request
+GET /t
+--- response_body
+passed
+
+
+
+=== TEST 22: set ssl with env, and prefix is all uppercase or lowercase
+--- config
+ location /t {
+ content_by_lua_block {
+ local core = require("apisix.core")
+ local t = require("lib.test_admin")
+
+ local data = {
+ sni = "test.com",
+ cert = "$ENV://APISIX_TEST_SSL_CERT",
+ key = "$env://APISIX_TEST_SSL_KEY",
+ certs = {"$env://APISIX_TEST_SSL_CERTS"},
+ keys = {"$ENV://APISIX_TEST_SSL_KEYS"},
+ }
+
+ local code, body = t.test('/apisix/admin/ssls/1',
+ ngx.HTTP_PUT,
+ core.json.encode(data),
+ [[{
+ "value": {
+ "sni": "test.com",
+ "cert": "$ENV://APISIX_TEST_SSL_CERT",
+ "key": "$env://APISIX_TEST_SSL_KEY",
+ "certs": ["$env://APISIX_TEST_SSL_CERTS"],
+ "keys": ["$ENV://APISIX_TEST_SSL_KEYS"]
+ },
+ "key": "/apisix/ssls/1"
+ }]]
+ )
+
+ ngx.status = code
+ ngx.say(body)
+ }
+ }
+--- request
+GET /t
+--- response_body
+passed
+
+
+
+=== TEST 23: set ssl with invalid prefix
+--- config
+ location /t {
+ content_by_lua_block {
+ local core = require("apisix.core")
+ local t = require("lib.test_admin")
+
+ local data = {
+ sni = "test.com",
+ cert = "$ENV://APISIX_TEST_SSL_CERT",
+ key = "$env://APISIX_TEST_SSL_KEY",
+ certs = {"https://APISIX_TEST_SSL_CERTS"},
+ keys = {"$ENV://APISIX_TEST_SSL_KEYS"},
+ }
+
+ local code, body = t.test('/apisix/admin/ssls/1',
+ ngx.HTTP_PUT,
+ core.json.encode(data),
+ [[{
+ "value": {
+ "sni": "test.com"
+ },
+ "key": "/apisix/ssls/1"
+ }]]
+ )
+
+ ngx.status = code
+ ngx.print(body)
+ }
+ }
+--- request
+GET /t
+--- error_code: 400
+--- response_body
+{"error_msg":"invalid configuration: property \"certs\" validation failed: failed to validate item 1: value should match only one schema, but matches none"}
diff --git a/t/admin/ssl4.t b/t/admin/ssl4.t
index 4b69f85383c9..c9de90d9b131 100644
--- a/t/admin/ssl4.t
+++ b/t/admin/ssl4.t
@@ -242,7 +242,6 @@ apisix:
- qeddd145sfvddff3
--- error_log
decrypt ssl key failed
-[alert]
@@ -404,3 +403,108 @@ location /t {
}
--- response_body
passed
+
+
+
+=== TEST 12: set ssl(sni: www.test.com), encrypt with the first keyring
+--- yaml_config
+apisix:
+ node_listen: 1984
+ data_encryption:
+ keyring:
+ - edd1c9f0985e76a1
+--- config
+location /t {
+ content_by_lua_block {
+ local core = require("apisix.core")
+ local t = require("lib.test_admin")
+
+ local ssl_cert = t.read_file("t/certs/apisix.crt")
+ local ssl_key = t.read_file("t/certs/apisix.key")
+ local data = {cert = ssl_cert, key = ssl_key, sni = "test.com"}
+
+ local code, body = t.test('/apisix/admin/ssls/1',
+ ngx.HTTP_PUT,
+ core.json.encode(data),
+ [[{
+ "value": {
+ "sni": "test.com"
+ },
+ "key": "/apisix/ssls/1"
+ }]]
+ )
+
+ ngx.status = code
+ ngx.say(body)
+ }
+}
+--- response_body
+passed
+
+
+
+=== TEST 13: update encrypt keyring, and set ssl(sni: test2.com)
+--- yaml_config
+apisix:
+ node_listen: 1984
+ data_encryption:
+ keyring:
+ - qeddd145sfvddff3
+ - edd1c9f0985e76a1
+--- config
+location /t {
+ content_by_lua_block {
+ local core = require("apisix.core")
+ local t = require("lib.test_admin")
+
+ local ssl_cert = t.read_file("t/certs/test2.crt")
+ local ssl_key = t.read_file("t/certs/test2.key")
+ local data = {cert = ssl_cert, key = ssl_key, sni = "test2.com"}
+
+ local code, body = t.test('/apisix/admin/ssls/2',
+ ngx.HTTP_PUT,
+ core.json.encode(data),
+ [[{
+ "value": {
+ "sni": "test2.com"
+ },
+ "key": "/apisix/ssls/2"
+ }]]
+ )
+
+ ngx.status = code
+ ngx.say(body)
+ }
+}
+--- response_body
+passed
+
+
+
+=== TEST 14: Successfully access test.com
+--- yaml_config
+apisix:
+ node_listen: 1984
+ data_encryption:
+ keyring:
+ - qeddd145sfvddff3
+ - edd1c9f0985e76a1
+--- exec
+curl -k -s --resolve "test2.com:1994:127.0.0.1" https://test2.com:1994/hello 2>&1 | cat
+--- response_body
+hello world
+
+
+
+=== TEST 15: Successfully access test2.com
+--- yaml_config
+apisix:
+ node_listen: 1984
+ data_encryption:
+ keyring:
+ - qeddd145sfvddff3
+ - edd1c9f0985e76a1
+--- exec
+curl -k -s --resolve "test2.com:1994:127.0.0.1" https://test2.com:1994/hello 2>&1 | cat
+--- response_body
+hello world
diff --git a/t/cli/test_admin.sh b/t/cli/test_admin.sh
index b11ae3564e79..1298cc1dd406 100755
--- a/t/cli/test_admin.sh
+++ b/t/cli/test_admin.sh
@@ -255,8 +255,7 @@ deployment:
admin:
allow_admin: ~
admin_key:
- -
- name: "admin"
+ - name: "admin"
key: ''
role: admin
' > conf/config.yaml
diff --git a/t/cli/test_main.sh b/t/cli/test_main.sh
index a55787f4be52..9637000176bb 100755
--- a/t/cli/test_main.sh
+++ b/t/cli/test_main.sh
@@ -664,8 +664,8 @@ echo "passed: disable ssl_session_tickets by default"
# support 3rd-party plugin
echo '
apisix:
- extra_lua_path: "\$prefix/example/?.lua"
- extra_lua_cpath: "\$prefix/example/?.lua"
+ extra_lua_path: "$prefix/example/?.lua"
+ extra_lua_cpath: "$prefix/example/?.lua"
plugins:
- 3rd-party
stream_plugins:
@@ -716,7 +716,7 @@ echo "passed: bad lua_module_hook should be rejected"
echo '
apisix:
proxy_mode: http&stream
- extra_lua_path: "\$prefix/example/?.lua"
+ extra_lua_path: "$prefix/example/?.lua"
lua_module_hook: "my_hook"
stream_proxy:
tcp:
@@ -838,7 +838,7 @@ apisix:
disk_path: /tmp/disk_cache_one
disk_size: 100m
memory_size: 20m
- cache_levels: 1:2
+ cache_levels: "1:2"
' > conf/config.yaml
make init
diff --git a/t/cli/test_prometheus_run_in_privileged.sh b/t/cli/test_prometheus_run_in_privileged.sh
index a97cf307e26c..08d0193531ca 100755
--- a/t/cli/test_prometheus_run_in_privileged.sh
+++ b/t/cli/test_prometheus_run_in_privileged.sh
@@ -29,7 +29,7 @@ rm logs/error.log || true
echo '
apisix:
- extra_lua_path: "\$prefix/t/lib/?.lua"
+ extra_lua_path: "$prefix/t/lib/?.lua"
nginx_config:
error_log_level: info
' > conf/config.yaml
@@ -53,10 +53,10 @@ echo "prometheus run in privileged agent successfully when only http is enabled"
sleep 0.5
rm logs/error.log || true
-echo "
+echo '
apisix:
- proxy_mode: http&stream
- extra_lua_path: "\$prefix/t/lib/?.lua"
+ proxy_mode: "http&stream"
+ extra_lua_path: "$prefix/t/lib/?.lua"
enable_admin: true
stream_proxy:
tcp:
@@ -65,7 +65,7 @@ stream_plugins:
- prometheus
nginx_config:
error_log_level: info
-" > conf/config.yaml
+' > conf/config.yaml
make run
sleep 0.1
@@ -86,10 +86,10 @@ make stop
sleep 0.5
rm logs/error.log || true
-echo "
+echo '
apisix:
- proxy_mode: http&stream
- extra_lua_path: "\$prefix/t/lib/?.lua"
+ proxy_mode: "http&stream"
+ extra_lua_path: "$prefix/t/lib/?.lua"
enable_admin: false
stream_proxy:
tcp:
@@ -98,7 +98,7 @@ stream_plugins:
- prometheus
nginx_config:
error_log_level: info
-" > conf/config.yaml
+' > conf/config.yaml
make run
sleep 0.1
diff --git a/t/config-center-yaml/consumer.t b/t/config-center-yaml/consumer.t
index e901b57c5f9c..4fb356185933 100644
--- a/t/config-center-yaml/consumer.t
+++ b/t/config-center-yaml/consumer.t
@@ -34,19 +34,6 @@ deployment:
_EOC_
$block->set_value("yaml_config", $yaml_config);
-
- my $routes = <<_EOC_;
-routes:
- -
- uri: /hello
- upstream:
- nodes:
- "127.0.0.1:1980": 1
- type: roundrobin
-#END
-_EOC_
-
- $block->set_value("apisix_yaml", $block->apisix_yaml . $routes);
});
run_tests();
@@ -57,6 +44,12 @@ __DATA__
--- apisix_yaml
consumers:
- username: jwt-auth
+routes:
+ - uri: /hello
+ upstream:
+ nodes:
+ "127.0.0.1:1980": 1
+ type: roundrobin
#END
--- request
GET /hello
diff --git a/t/config-center-yaml/plugin-configs.t b/t/config-center-yaml/plugin-configs.t
index f10c3651ad45..e199e58825e3 100644
--- a/t/config-center-yaml/plugin-configs.t
+++ b/t/config-center-yaml/plugin-configs.t
@@ -58,7 +58,7 @@ routes:
plugin_config_id: 1
upstream:
nodes:
- "127.0.0.1:1980":1
+ "127.0.0.1:1980": 1
type: roundrobin
#END
--- response_body
@@ -74,7 +74,7 @@ routes:
plugin_config_id: 1
upstream:
nodes:
- "127.0.0.1:1980":1
+ "127.0.0.1:1980": 1
type: roundrobin
#END
--- error_code: 503
@@ -105,7 +105,7 @@ routes:
body: "world\n"
upstream:
nodes:
- "127.0.0.1:1980":1
+ "127.0.0.1:1980": 1
type: roundrobin
#END
--- request
@@ -135,7 +135,7 @@ routes:
plugin_config_id: 1
upstream:
nodes:
- "127.0.0.1:1980":1
+ "127.0.0.1:1980": 1
type: roundrobin
#END
--- error_code: 503
diff --git a/t/config-center-yaml/plugin-metadata.t b/t/config-center-yaml/plugin-metadata.t
index e11461a81cc3..34c6949b881a 100644
--- a/t/config-center-yaml/plugin-metadata.t
+++ b/t/config-center-yaml/plugin-metadata.t
@@ -58,7 +58,7 @@ routes:
plugin_metadata:
- id: http-logger
log_format:
- host: "$host",
+ host: "$host"
remote_addr: "$remote_addr"
#END
--- request
diff --git a/t/config-center-yaml/plugin.t b/t/config-center-yaml/plugin.t
index 36ce69efe393..2ee975d0d990 100644
--- a/t/config-center-yaml/plugin.t
+++ b/t/config-center-yaml/plugin.t
@@ -36,18 +36,19 @@ _EOC_
$block->set_value("yaml_config", $yaml_config);
- my $routes = <<_EOC_;
+ if (!$block->apisix_yaml) {
+ my $routes = <<_EOC_;
routes:
- -
- uri: /hello
+ - uri: /hello
upstream:
- nodes:
- "127.0.0.1:1980": 1
- type: roundrobin
+ nodes:
+ "127.0.0.1:1980": 1
+ type: roundrobin
#END
_EOC_
- $block->set_value("apisix_yaml", $block->apisix_yaml . $routes);
+ $block->set_value("apisix_yaml", $block->extra_apisix_yaml . $routes);
+ }
});
our $debug_config = t::APISIX::read_file("conf/debug.yaml");
@@ -55,10 +56,13 @@ $debug_config =~ s/basic:\n enable: false/basic:\n enable: true/;
run_tests();
+## TODO: extra_apisix_yaml is specific to this document and is not standard behavior for
+## the APISIX testing framework, so it should be standardized or replaced later.
+
__DATA__
=== TEST 1: sanity
---- apisix_yaml
+--- extra_apisix_yaml
plugins:
- name: ip-restriction
- name: jwt-auth
@@ -111,7 +115,7 @@ plugins:
- jwt-auth
stream_plugins:
- mqtt-proxy
---- apisix_yaml
+--- extra_apisix_yaml
plugins:
- name: ip-restriction
- name: jwt-auth
@@ -144,7 +148,7 @@ qr/(loaded plugin and sort by priority: (3000 name: ip-restriction|2510 name: jw
=== TEST 3: disable plugin and its router
---- apisix_yaml
+--- extra_apisix_yaml
plugins:
- name: jwt-auth
--- request
@@ -162,6 +166,7 @@ routes:
plugins:
- name: public-api
- name: prometheus
+#END
--- request
GET /apisix/prometheus/metrics
@@ -181,7 +186,7 @@ plugins:
- jwt-auth
stream_plugins:
- mqtt-proxy
---- apisix_yaml
+--- extra_apisix_yaml
plugins:
- name: xxx
stream: ip-restriction
@@ -197,7 +202,7 @@ load(): plugins not changed
=== TEST 6: empty plugin list
---- apisix_yaml
+--- extra_apisix_yaml
plugins:
stream_plugins:
--- debug_config eval: $::debug_config
diff --git a/t/core/config_etcd.t b/t/core/config_etcd.t
index 39d3cd4d9bbe..7f31fc8592be 100644
--- a/t/core/config_etcd.t
+++ b/t/core/config_etcd.t
@@ -36,7 +36,7 @@ deployment:
etcd:
prefix: "/apisix"
host:
- - "http://127.0.0.1:7777" -- wrong etcd port
+ - "http://127.0.0.1:7777" # wrong etcd port
timeout: 1
--- config
location /t {
@@ -208,10 +208,10 @@ deployment:
config_provider: etcd
etcd:
host:
- - "http://127.0.0.1:1980" -- fake server port
+ - "http://127.0.0.1:1980" # fake server port
timeout: 1
- user: root # root username for etcd
- password: 5tHkHhYkjr6cQY # root password for etcd
+ user: root # root username for etcd
+ password: 5tHkHhYkjr6cQY # root password for etcd
--- extra_init_by_lua
local health_check = require("resty.etcd.health_check")
health_check.get_target_status = function()
diff --git a/t/core/etcd-mtls.t b/t/core/etcd-mtls.t
index 5fa2bfd432c2..3300aae42639 100644
--- a/t/core/etcd-mtls.t
+++ b/t/core/etcd-mtls.t
@@ -159,6 +159,8 @@ deployment:
cert: t/certs/mtls_client.crt
key: t/certs/mtls_client.key
verify: false
+ admin:
+ admin_key_required: false
--- config
location /t {
content_by_lua_block {
diff --git a/t/core/request.t b/t/core/request.t
index c343e2d28fa8..322050830a14 100644
--- a/t/core/request.t
+++ b/t/core/request.t
@@ -463,3 +463,30 @@ test
test
--- error_log
DEPRECATED: use add_header(ctx, header_name, header_value) instead
+
+
+
+=== TEST 16: after setting the header, ctx.var can still access the correct value
+--- config
+ location /t {
+ content_by_lua_block {
+ local core = require("apisix.core")
+ ngx.ctx.api_ctx = {}
+ local ctx = ngx.ctx.api_ctx
+ core.ctx.set_vars_meta(ctx)
+
+ ctx.var.http_server = "ngx"
+ ngx.say(ctx.var.http_server)
+
+ core.request.set_header(ctx, "server", "test")
+ ngx.say(ctx.var.http_server)
+
+ -- case-insensitive
+ core.request.set_header(ctx, "Server", "apisix")
+ ngx.say(ctx.var.http_server)
+ }
+ }
+--- response_body
+ngx
+test
+apisix
diff --git a/t/kubernetes/discovery/kubernetes2.t b/t/kubernetes/discovery/kubernetes2.t
index 816c797face5..9ec58f50c3ad 100644
--- a/t/kubernetes/discovery/kubernetes2.t
+++ b/t/kubernetes/discovery/kubernetes2.t
@@ -36,8 +36,8 @@ discovery:
token_file: "/tmp/var/run/secrets/kubernetes.io/serviceaccount/token"
- id: second
service:
- schema: "http",
- host: "127.0.0.1",
+ schema: "http"
+ host: "127.0.0.1"
port: "6445"
client:
token_file: "/tmp/var/run/secrets/kubernetes.io/serviceaccount/token"
@@ -366,8 +366,8 @@ discovery:
token: ${KUBERNETES_CLIENT_TOKEN}
- id: second
service:
- schema: "http",
- host: "127.0.0.1",
+ schema: "http"
+ host: "127.0.0.1"
port: "6445"
client:
token: ${KUBERNETES_CLIENT_TOKEN}
@@ -405,8 +405,8 @@ discovery:
equal: ns-a
- id: second
service:
- schema: "http",
- host: "127.0.0.1",
+ schema: "http"
+ host: "127.0.0.1"
port: "6445"
client:
token: ${KUBERNETES_CLIENT_TOKEN}
@@ -443,8 +443,8 @@ discovery:
not_equal: ns-a
- id: second
service:
- schema: "http",
- host: "127.0.0.1",
+ schema: "http"
+ host: "127.0.0.1"
port: "6445"
client:
token: ${KUBERNETES_CLIENT_TOKEN}
@@ -481,8 +481,8 @@ discovery:
match: [ns-a,ns-b]
- id: second
service:
- schema: "http",
- host: "127.0.0.1",
+ schema: "http"
+ host: "127.0.0.1"
port: "6445"
client:
token: ${KUBERNETES_CLIENT_TOKEN}
@@ -519,8 +519,8 @@ discovery:
match: ["ns-[ab]"]
- id: second
service:
- schema: "http",
- host: "127.0.0.1",
+ schema: "http"
+ host: "127.0.0.1"
port: "6445"
client:
token: ${KUBERNETES_CLIENT_TOKEN}
@@ -557,8 +557,8 @@ discovery:
not_match: ["ns-a"]
- id: second
service:
- schema: "http",
- host: "127.0.0.1",
+ schema: "http"
+ host: "127.0.0.1"
port: "6445"
client:
token: ${KUBERNETES_CLIENT_TOKEN}
@@ -595,8 +595,8 @@ discovery:
not_match: ["ns-[ab]"]
- id: second
service:
- schema: "http",
- host: "127.0.0.1",
+ schema: "http"
+ host: "127.0.0.1"
port: "6445"
client:
token: ${KUBERNETES_CLIENT_TOKEN}
@@ -633,8 +633,8 @@ discovery:
first=1,second
- id: second
service:
- schema: "http",
- host: "127.0.0.1",
+ schema: "http"
+ host: "127.0.0.1"
port: "6445"
client:
token: ${KUBERNETES_CLIENT_TOKEN}
diff --git a/t/kubernetes/discovery/kubernetes3.t b/t/kubernetes/discovery/kubernetes3.t
index aa90151c1ce6..e2242e9a8c13 100644
--- a/t/kubernetes/discovery/kubernetes3.t
+++ b/t/kubernetes/discovery/kubernetes3.t
@@ -37,8 +37,8 @@ discovery:
watch_endpoint_slices: true
- id: second
service:
- schema: "http",
- host: "127.0.0.1",
+ schema: "http"
+ host: "127.0.0.1"
port: "6445"
client:
token_file: "/tmp/var/run/secrets/kubernetes.io/serviceaccount/token"
@@ -401,8 +401,8 @@ discovery:
watch_endpoint_slices: true
- id: second
service:
- schema: "http",
- host: "127.0.0.1",
+ schema: "http"
+ host: "127.0.0.1"
port: "6445"
client:
token: ${KUBERNETES_CLIENT_TOKEN}
diff --git a/t/kubernetes/discovery/stream/kubernetes.t b/t/kubernetes/discovery/stream/kubernetes.t
index 5d9e06c86291..a9058f55dfe1 100644
--- a/t/kubernetes/discovery/stream/kubernetes.t
+++ b/t/kubernetes/discovery/stream/kubernetes.t
@@ -36,8 +36,8 @@ discovery:
token_file: "/tmp/var/run/secrets/kubernetes.io/serviceaccount/token"
- id: second
service:
- schema: "http",
- host: "127.0.0.1",
+ schema: "http"
+ host: "127.0.0.1"
port: "6445"
client:
token_file: "/tmp/var/run/secrets/kubernetes.io/serviceaccount/token"
diff --git a/t/node/grpc-proxy-mtls.t b/t/node/grpc-proxy-mtls.t
index b4d31b9d6698..bb5efcc5222d 100644
--- a/t/node/grpc-proxy-mtls.t
+++ b/t/node/grpc-proxy-mtls.t
@@ -56,7 +56,7 @@ routes:
upstream:
scheme: grpcs
tls:
- client_cert: "-----BEGIN CERTIFICATE-----\nMIIDUzCCAjugAwIBAgIURw+Rc5FSNUQWdJD+quORtr9KaE8wDQYJKoZIhvcNAQEN\nBQAwWDELMAkGA1UEBhMCY24xEjAQBgNVBAgMCUd1YW5nRG9uZzEPMA0GA1UEBwwG\nWmh1SGFpMRYwFAYDVQQDDA1jYS5hcGlzaXguZGV2MQwwCgYDVQQLDANvcHMwHhcN\nMjIxMjAxMTAxOTU3WhcNNDIwODE4MTAxOTU3WjBOMQswCQYDVQQGEwJjbjESMBAG\nA1UECAwJR3VhbmdEb25nMQ8wDQYDVQQHDAZaaHVIYWkxGjAYBgNVBAMMEWNsaWVu\ndC5hcGlzaXguZGV2MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAzypq\nkrsJ8MaqpS0kr2SboE9aRKOJzd6mY3AZLq3tFpio5cK5oIHkQLfeaaLcd4ycFcZw\nFTpxc+Eth6I0X9on+j4tEibc5IpDnRSAQlzHZzlrOG6WxcOza4VmfcrKqj27oodr\noqXv05r/5yIoRrEN9ZXfA8n2OnjhkP+C3Q68L6dBtPpv+e6HaAuw8MvcsEo+MQwu\ncTZyWqWT2UzKVzToW29dHRW+yZGuYNWRh15X09VSvx+E0s+uYKzN0Cyef2C6VtBJ\nKmJ3NtypAiPqw7Ebfov2Ym/zzU9pyWPi3P1mYPMKQqUT/FpZSXm4iSy0a5qTYhkF\nrFdV1YuYYZL5YGl9aQIDAQABox8wHTAbBgNVHREEFDASghBhZG1pbi5hcGlzaXgu\nZGV2MA0GCSqGSIb3DQEBDQUAA4IBAQBepRpwWdckZ6QdL5EuufYwU7p5SIqkVL/+\nN4/l5YSjPoAZf/M6XkZu/PsLI9/kPZN/PX4oxjZSDH14dU9ON3JjxtSrebizcT8V\naQ13TeW9KSv/i5oT6qBmj+V+RF2YCUhyzXdYokOfsSVtSlA1qMdm+cv0vkjYcImV\nl3L9nVHRPq15dY9sbmWEtFBWvOzqNSuQYax+iYG+XEuL9SPaYlwKRC6eS/dbXa1T\nPPWDQad2X/WmhxPzEHvjSl2bsZF1u0GEdKyhXWMOLCLiYIJo15G7bMz8cTUvkDN3\n6WaWBd6bd2g13Ho/OOceARpkR/ND8PU78Y8cq+zHoOSqH+1aly5H\n-----END CERTIFICATE-----\n",
+ client_cert: "-----BEGIN CERTIFICATE-----\nMIIDUzCCAjugAwIBAgIURw+Rc5FSNUQWdJD+quORtr9KaE8wDQYJKoZIhvcNAQEN\nBQAwWDELMAkGA1UEBhMCY24xEjAQBgNVBAgMCUd1YW5nRG9uZzEPMA0GA1UEBwwG\nWmh1SGFpMRYwFAYDVQQDDA1jYS5hcGlzaXguZGV2MQwwCgYDVQQLDANvcHMwHhcN\nMjIxMjAxMTAxOTU3WhcNNDIwODE4MTAxOTU3WjBOMQswCQYDVQQGEwJjbjESMBAG\nA1UECAwJR3VhbmdEb25nMQ8wDQYDVQQHDAZaaHVIYWkxGjAYBgNVBAMMEWNsaWVu\ndC5hcGlzaXguZGV2MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAzypq\nkrsJ8MaqpS0kr2SboE9aRKOJzd6mY3AZLq3tFpio5cK5oIHkQLfeaaLcd4ycFcZw\nFTpxc+Eth6I0X9on+j4tEibc5IpDnRSAQlzHZzlrOG6WxcOza4VmfcrKqj27oodr\noqXv05r/5yIoRrEN9ZXfA8n2OnjhkP+C3Q68L6dBtPpv+e6HaAuw8MvcsEo+MQwu\ncTZyWqWT2UzKVzToW29dHRW+yZGuYNWRh15X09VSvx+E0s+uYKzN0Cyef2C6VtBJ\nKmJ3NtypAiPqw7Ebfov2Ym/zzU9pyWPi3P1mYPMKQqUT/FpZSXm4iSy0a5qTYhkF\nrFdV1YuYYZL5YGl9aQIDAQABox8wHTAbBgNVHREEFDASghBhZG1pbi5hcGlzaXgu\nZGV2MA0GCSqGSIb3DQEBDQUAA4IBAQBepRpwWdckZ6QdL5EuufYwU7p5SIqkVL/+\nN4/l5YSjPoAZf/M6XkZu/PsLI9/kPZN/PX4oxjZSDH14dU9ON3JjxtSrebizcT8V\naQ13TeW9KSv/i5oT6qBmj+V+RF2YCUhyzXdYokOfsSVtSlA1qMdm+cv0vkjYcImV\nl3L9nVHRPq15dY9sbmWEtFBWvOzqNSuQYax+iYG+XEuL9SPaYlwKRC6eS/dbXa1T\nPPWDQad2X/WmhxPzEHvjSl2bsZF1u0GEdKyhXWMOLCLiYIJo15G7bMz8cTUvkDN3\n6WaWBd6bd2g13Ho/OOceARpkR/ND8PU78Y8cq+zHoOSqH+1aly5H\n-----END CERTIFICATE-----\n"
client_key: "-----BEGIN RSA PRIVATE KEY-----\nMIIEpAIBAAKCAQEAzypqkrsJ8MaqpS0kr2SboE9aRKOJzd6mY3AZLq3tFpio5cK5\noIHkQLfeaaLcd4ycFcZwFTpxc+Eth6I0X9on+j4tEibc5IpDnRSAQlzHZzlrOG6W\nxcOza4VmfcrKqj27oodroqXv05r/5yIoRrEN9ZXfA8n2OnjhkP+C3Q68L6dBtPpv\n+e6HaAuw8MvcsEo+MQwucTZyWqWT2UzKVzToW29dHRW+yZGuYNWRh15X09VSvx+E\n0s+uYKzN0Cyef2C6VtBJKmJ3NtypAiPqw7Ebfov2Ym/zzU9pyWPi3P1mYPMKQqUT\n/FpZSXm4iSy0a5qTYhkFrFdV1YuYYZL5YGl9aQIDAQABAoIBAD7tUG//lnZnsj/4\nJXONaORaFj5ROrOpFPuRemS+egzqFCuuaXpC2lV6RHnr+XHq6SKII1WfagTb+lt/\nvs760jfmGQSxf1mAUidtqcP+sKc/Pr1mgi/SUTawz8AYEFWD6PHmlqBSLTYml+La\nckd+0pGtk49wEnYSb9n+cv640hra9AYpm9LXUFaypiFEu+xJhtyKKWkmiVGrt/X9\n3aG6MuYeZplW8Xq1L6jcHsieTOB3T+UBfG3O0bELBgTVexOQYI9O4Ejl9/n5/8WP\nAbIw7PaAYc7fBkwOGh7/qYUdHnrm5o9MiRT6dPxrVSf0PZVACmA+JoNjCPv0Typf\n3MMkHoECgYEA9+3LYzdP8j9iv1fP5hn5K6XZAobCD1mnzv3my0KmoSMC26XuS71f\nvyBhjL7zMxGEComvVTF9SaNMfMYTU4CwOJQxLAuT69PEzW6oVEeBoscE5hwhjj6o\n/lr5jMbt807J9HnldSpwllfj7JeiTuqRcCu/cwqKQQ1aB3YBZ7h5pZkCgYEA1ejo\nKrR1hN2FMhp4pj0nZ5+Ry2lyIVbN4kIcoteaPhyQ0AQ0zNoi27EBRnleRwVDYECi\nXAFrgJU+laKsg1iPjvinHibrB9G2p1uv3BEh6lPl9wPFlENTOjPkqjR6eVVZGP8e\nVzxYxIo2x/QLDUeOpxySdG4pdhEHGfvmdGmr2FECgYBeknedzhCR4HnjcTSdmlTA\nwI+p9gt6XYG0ZIewCymSl89UR9RBUeh++HQdgw0z8r+CYYjfH3SiLUdU5R2kIZeW\nzXiAS55OO8Z7cnWFSI17sRz+RcbLAr3l4IAGoi9MO0awGftcGSc/QiFwM1s3bSSz\nPAzYbjHUpKot5Gae0PCeKQKBgQCHfkfRBQ2LY2WDHxFc+0+Ca6jF17zbMUioEIhi\n/X5N6XowyPlI6MM7tRrBsQ7unX7X8Rjmfl/ByschsTDk4avNO+NfTfeBtGymBYWX\nN6Lr8sivdkwoZZzKOSSWSzdos48ELlThnO/9Ti706Lg3aSQK5iY+aakJiC+fXdfT\n1TtsgQKBgQDRYvtK/Cpaq0W6wO3I4R75lHGa7zjEr4HA0Kk/FlwS0YveuTh5xqBj\nwQz2YyuQQfJfJs7kbWOITBT3vuBJ8F+pktL2Xq5p7/ooIXOGS8Ib4/JAS1C/wb+t\nuJHGva12bZ4uizxdL2Q0/n9ziYTiMc/MMh/56o4Je8RMdOMT5lTsRQ==\n-----END RSA PRIVATE KEY-----\n"
nodes:
"127.0.0.1:10053": 1
@@ -85,7 +85,7 @@ routes:
upstream:
scheme: grpcs
tls:
- client_cert: "-----BEGIN CERTIFICATE-----\nMIIDUzCCAjugAwIBAgIURw+Rc5FSNUQWdJD+quORtr9KaE8wDQYJKoZIhvcNAQEN\nBQAwWDELMAkGA1UEBhMCY24xEjAQBgNVBAgMCUd1YW5nRG9uZzEPMA0GA1UEBwwG\nWmh1SGFpMRYwFAYDVQQDDA1jYS5hcGlzaXguZGV2MQwwCgYDVQQLDANvcHMwHhcN\nMjIxMjAxMTAxOTU3WhcNNDIwODE4MTAxOTU3WjBOMQswCQYDVQQGEwJjbjESMBAG\nA1UECAwJR3VhbmdEb25nMQ8wDQYDVQQHDAZaaHVIYWkxGjAYBgNVBAMMEWNsaWVu\ndC5hcGlzaXguZGV2MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAzypq\nkrsJ8MaqpS0kr2SboE9aRKOJzd6mY3AZLq3tFpio5cK5oIHkQLfeaaLcd4ycFcZw\nFTpxc+Eth6I0X9on+j4tEibc5IpDnRSAQlzHZzlrOG6WxcOza4VmfcrKqj27oodr\noqXv05r/5yIoRrEN9ZXfA8n2OnjhkP+C3Q68L6dBtPpv+e6HaAuw8MvcsEo+MQwu\ncTZyWqWT2UzKVzToW29dHRW+yZGuYNWRh15X09VSvx+E0s+uYKzN0Cyef2C6VtBJ\nKmJ3NtypAiPqw7Ebfov2Ym/zzU9pyWPi3P1mYPMKQqUT/FpZSXm4iSy0a5qTYhkF\nrFdV1YuYYZL5YGl9aQIDAQABox8wHTAbBgNVHREEFDASghBhZG1pbi5hcGlzaXgu\nZGV2MA0GCSqGSIb3DQEBDQUAA4IBAQBepRpwWdckZ6QdL5EuufYwU7p5SIqkVL/+\nN4/l5YSjPoAZf/M6XkZu/PsLI9/kPZN/PX4oxjZSDH14dU9ON3JjxtSrebizcT8V\naQ13TeW9KSv/i5oT6qBmj+V+RF2YCUhyzXdYokOfsSVtSlA1qMdm+cv0vkjYcImV\nl3L9nVHRPq15dY9sbmWEtFBWvOzqNSuQYax+iYG+XEuL9SPaYlwKRC6eS/dbXa1T\nPPWDQad2X/WmhxPzEHvjSl2bsZF1u0GEdKyhXWMOLCLiYIJo15G7bMz8cTUvkDN3\n6WaWBd6bd2g13Ho/OOceARpkR/ND8PU78Y8cq+zHoOSqH+1aly5H\n-----END CERTIFICATE-----\n",
+ client_cert: "-----BEGIN CERTIFICATE-----\nMIIDUzCCAjugAwIBAgIURw+Rc5FSNUQWdJD+quORtr9KaE8wDQYJKoZIhvcNAQEN\nBQAwWDELMAkGA1UEBhMCY24xEjAQBgNVBAgMCUd1YW5nRG9uZzEPMA0GA1UEBwwG\nWmh1SGFpMRYwFAYDVQQDDA1jYS5hcGlzaXguZGV2MQwwCgYDVQQLDANvcHMwHhcN\nMjIxMjAxMTAxOTU3WhcNNDIwODE4MTAxOTU3WjBOMQswCQYDVQQGEwJjbjESMBAG\nA1UECAwJR3VhbmdEb25nMQ8wDQYDVQQHDAZaaHVIYWkxGjAYBgNVBAMMEWNsaWVu\ndC5hcGlzaXguZGV2MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAzypq\nkrsJ8MaqpS0kr2SboE9aRKOJzd6mY3AZLq3tFpio5cK5oIHkQLfeaaLcd4ycFcZw\nFTpxc+Eth6I0X9on+j4tEibc5IpDnRSAQlzHZzlrOG6WxcOza4VmfcrKqj27oodr\noqXv05r/5yIoRrEN9ZXfA8n2OnjhkP+C3Q68L6dBtPpv+e6HaAuw8MvcsEo+MQwu\ncTZyWqWT2UzKVzToW29dHRW+yZGuYNWRh15X09VSvx+E0s+uYKzN0Cyef2C6VtBJ\nKmJ3NtypAiPqw7Ebfov2Ym/zzU9pyWPi3P1mYPMKQqUT/FpZSXm4iSy0a5qTYhkF\nrFdV1YuYYZL5YGl9aQIDAQABox8wHTAbBgNVHREEFDASghBhZG1pbi5hcGlzaXgu\nZGV2MA0GCSqGSIb3DQEBDQUAA4IBAQBepRpwWdckZ6QdL5EuufYwU7p5SIqkVL/+\nN4/l5YSjPoAZf/M6XkZu/PsLI9/kPZN/PX4oxjZSDH14dU9ON3JjxtSrebizcT8V\naQ13TeW9KSv/i5oT6qBmj+V+RF2YCUhyzXdYokOfsSVtSlA1qMdm+cv0vkjYcImV\nl3L9nVHRPq15dY9sbmWEtFBWvOzqNSuQYax+iYG+XEuL9SPaYlwKRC6eS/dbXa1T\nPPWDQad2X/WmhxPzEHvjSl2bsZF1u0GEdKyhXWMOLCLiYIJo15G7bMz8cTUvkDN3\n6WaWBd6bd2g13Ho/OOceARpkR/ND8PU78Y8cq+zHoOSqH+1aly5H\n-----END CERTIFICATE-----\n"
client_key: "-----BEGIN RSA PRIVATE KEY-----\nMIIEpAIBAAKCAQEAzypqkrsJ8MaqpS0kr2SboE9aRKOJzd6mY3AZLq3tFpio5cK5\noIHkQLfeaaLcd4ycFcZwFTpxc+Eth6I0X9on+j4tEibc5IpDnRSAQlzHZzlrOG6W\nxcOza4VmfcrKqj27oodroqXv05r/5yIoRrEN9ZXfA8n2OnjhkP+C3Q68L6dBtPpv\n+e6HaAuw8MvcsEo+MQwucTZyWqWT2UzKVzToW29dHRW+yZGuYNWRh15X09VSvx+E\n0s+uYKzN0Cyef2C6VtBJKmJ3NtypAiPqw7Ebfov2Ym/zzU9pyWPi3P1mYPMKQqUT\n/FpZSXm4iSy0a5qTYhkFrFdV1YuYYZL5YGl9aQIDAQABAoIBAD7tUG//lnZnsj/4\nJXONaORaFj5ROrOpFPuRemS+egzqFCuuaXpC2lV6RHnr+XHq6SKII1WfagTb+lt/\nvs760jfmGQSxf1mAUidtqcP+sKc/Pr1mgi/SUTawz8AYEFWD6PHmlqBSLTYml+La\nckd+0pGtk49wEnYSb9n+cv640hra9AYpm9LXUFaypiFEu+xJhtyKKWkmiVGrt/X9\n3aG6MuYeZplW8Xq1L6jcHsieTOB3T+UBfG3O0bELBgTVexOQYI9O4Ejl9/n5/8WP\nAbIw7PaAYc7fBkwOGh7/qYUdHnrm5o9MiRT6dPxrVSf0PZVACmA+JoNjCPv0Typf\n3MMkHoECgYEA9+3LYzdP8j9iv1fP5hn5K6XZAobCD1mnzv3my0KmoSMC26XuS71f\nvyBhjL7zMxGEComvVTF9SaNMfMYTU4CwOJQxLAuT69PEzW6oVEeBoscE5hwhjj6o\n/lr5jMbt807J9HnldSpwllfj7JeiTuqRcCu/cwqKQQ1aB3YBZ7h5pZkCgYEA1ejo\nKrR1hN2FMhp4pj0nZ5+Ry2lyIVbN4kIcoteaPhyQ0AQ0zNoi27EBRnleRwVDYECi\nXAFrgJU+laKsg1iPjvinHibrB9G2p1uv3BEh6lPl9wPFlENTOjPkqjR6eVVZGP8e\nVzxYxIo2x/QLDUeOpxySdG4pdhEHGfvmdGmr2FECgYBeknedzhCR4HnjcTSdmlTA\nwI+p9gt6XYG0ZIewCymSl89UR9RBUeh++HQdgw0z8r+CYYjfH3SiLUdU5R2kIZeW\nzXiAS55OO8Z7cnWFSI17sRz+RcbLAr3l4IAGoi9MO0awGftcGSc/QiFwM1s3bSSz\nPAzYbjHUpKot5Gae0PCeKQKBgQCHfkfRBQ2LY2WDHxFc+0+Ca6jF17zbMUioEIhi\n/X5N6XowyPlI6MM7tRrBsQ7unX7X8Rjmfl/ByschsTDk4avNO+NfTfeBtGymBYWX\nN6Lr8sivdkwoZZzKOSSWSzdos48ELlThnO/9Ti706Lg3aSQK5iY+aakJiC+fXdfT\n1TtsgQKBgQDRYvtK/Cpaq0W6wO3I4R75lHGa7zjEr4HA0Kk/FlwS0YveuTh5xqBj\nwQz2YyuQQfJfJs7kbWOITBT3vuBJ8F+pktL2Xq5p7/ooIXOGS8Ib4/JAS1C/wb+t\nuJHGva12bZ4uizxdL2Q0/n9ziYTiMc/MMh/56o4Je8RMdOMT5lTsRQ==\n-----END RSA PRIVATE KEY-----\n"
nodes:
"127.0.0.1:10053": 1
diff --git a/t/node/healthcheck2.t b/t/node/healthcheck2.t
index e52cf13a052f..d63e80ebde4b 100644
--- a/t/node/healthcheck2.t
+++ b/t/node/healthcheck2.t
@@ -91,8 +91,7 @@ services:
interval: 1
http_failures: 2
routes:
- -
- service_id: 1
+ - service_id: 1
uri: /server_port
#END
--- config
@@ -166,8 +165,7 @@ services:
interval: 1
http_failures: 2
routes:
- -
- service_id: 1
+ - service_id: 1
uri: /server_port
upstream:
type: roundrobin
diff --git a/t/node/https-proxy.t b/t/node/https-proxy.t
index 56a238fb2988..efe209051570 100644
--- a/t/node/https-proxy.t
+++ b/t/node/https-proxy.t
@@ -122,8 +122,8 @@ routes:
nodes:
"127.0.0.1:1983": 1
type: roundrobin
- pass_host: "rewrite",
- upstream_host: "www.test.com",
+ pass_host: "rewrite"
+ upstream_host: "www.test.com"
#END
--- request
GET /uri
@@ -149,7 +149,7 @@ routes:
nodes:
"localhost:1983": 1
type: roundrobin
- pass_host: "node",
+ pass_host: "node"
#END
--- request
GET /uri
diff --git a/t/node/least_conn.t b/t/node/least_conn.t
index 6cd36db1927d..174252fd713d 100644
--- a/t/node/least_conn.t
+++ b/t/node/least_conn.t
@@ -40,10 +40,9 @@ _EOC_
my $route = <<_EOC_;
routes:
- -
- upstream_id: 1
+ - upstream_id: 1
uris:
- - /mysleep
+ - /mysleep
#END
_EOC_
@@ -61,8 +60,7 @@ __DATA__
=== TEST 1: select highest weight
--- apisix_yaml
upstreams:
- -
- id: 1
+ - id: 1
type: least_conn
nodes:
"127.0.0.1:1980": 2
@@ -77,8 +75,7 @@ proxy request to 127.0.0.1:1980 while connecting to upstream
=== TEST 2: select least conn
--- apisix_yaml
upstreams:
- -
- id: 1
+ - id: 1
type: least_conn
nodes:
"127.0.0.1:1980": 3
@@ -121,8 +118,7 @@ proxy request to 127.0.0.1:1980 while connecting to upstream
=== TEST 3: retry
--- apisix_yaml
upstreams:
- -
- id: 1
+ - id: 1
type: least_conn
nodes:
"127.0.0.1:1999": 2
@@ -140,8 +136,7 @@ proxy request to 127.0.0.1:1980 while connecting to upstream
=== TEST 4: retry all nodes, failed
--- apisix_yaml
upstreams:
- -
- id: 1
+ - id: 1
type: least_conn
nodes:
"127.0.0.1:1999": 2
diff --git a/t/node/priority-balancer/health-checker.t b/t/node/priority-balancer/health-checker.t
index 7ad685ac86bb..cd970c667d60 100644
--- a/t/node/priority-balancer/health-checker.t
+++ b/t/node/priority-balancer/health-checker.t
@@ -42,10 +42,9 @@ _EOC_
my $route = <<_EOC_;
routes:
- -
- upstream_id: 1
+ - upstream_id: 1
uris:
- - /hello
+ - /hello
#END
_EOC_
@@ -64,8 +63,7 @@ __DATA__
=== TEST 1: all are down detected by health checker
--- apisix_yaml
upstreams:
- -
- id: 1
+ - id: 1
type: least_conn
nodes:
- host: 127.0.0.1
diff --git a/t/node/priority-balancer/sanity.t b/t/node/priority-balancer/sanity.t
index 9f0688ba5e19..11acc7f32554 100644
--- a/t/node/priority-balancer/sanity.t
+++ b/t/node/priority-balancer/sanity.t
@@ -42,8 +42,7 @@ _EOC_
my $route = <<_EOC_;
routes:
- -
- upstream_id: 1
+ - upstream_id: 1
uris:
- /hello
- /mysleep
@@ -65,8 +64,7 @@ __DATA__
=== TEST 1: sanity
--- apisix_yaml
upstreams:
- -
- id: 1
+ - id: 1
type: least_conn
nodes:
- host: 127.0.0.1
@@ -109,8 +107,7 @@ proxy request to 127.0.0.1:1980
=== TEST 2: all failed
--- apisix_yaml
upstreams:
- -
- id: 1
+ - id: 1
type: least_conn
nodes:
- host: 127.0.0.1
@@ -140,8 +137,7 @@ proxy request to 127.0.0.1:1979
=== TEST 3: default priority is zero
--- apisix_yaml
upstreams:
- -
- id: 1
+ - id: 1
type: least_conn
nodes:
- host: 127.0.0.1
@@ -171,8 +167,7 @@ proxy request to 127.0.0.1:1980
=== TEST 4: least_conn
--- apisix_yaml
upstreams:
- -
- id: 1
+ - id: 1
type: least_conn
nodes:
- host: 127.0.0.1
@@ -229,8 +224,7 @@ proxy request to 127.0.0.1:1980 while connecting to upstream
=== TEST 5: roundrobin
--- apisix_yaml
upstreams:
- -
- id: 1
+ - id: 1
type: roundrobin
nodes:
- host: 127.0.0.1
@@ -265,8 +259,7 @@ proxy request to 127.0.0.4:1979
=== TEST 6: ewma
--- apisix_yaml
upstreams:
- -
- id: 1
+ - id: 1
type: ewma
key: remote_addr
nodes:
@@ -297,8 +290,7 @@ proxy request to 127.0.0.3:1979
=== TEST 7: chash
--- apisix_yaml
upstreams:
- -
- id: 1
+ - id: 1
type: chash
key: remote_addr
nodes:
diff --git a/t/node/ssl.t b/t/node/ssl.t
new file mode 100644
index 000000000000..33347366e975
--- /dev/null
+++ b/t/node/ssl.t
@@ -0,0 +1,243 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+BEGIN {
+ sub set_env_from_file {
+ my ($env_name, $file_path) = @_;
+
+ open my $fh, '<', $file_path or die $!;
+ my $content = do { local $/; <$fh> };
+ close $fh;
+
+ $ENV{$env_name} = $content;
+ }
+ # set env
+ set_env_from_file('TEST_CERT', 't/certs/apisix.crt');
+ set_env_from_file('TEST_KEY', 't/certs/apisix.key');
+ set_env_from_file('TEST2_CERT', 't/certs/test2.crt');
+ set_env_from_file('TEST2_KEY', 't/certs/test2.key');
+}
+
+use t::APISIX 'no_plan';
+
+log_level('info');
+no_root_location();
+
+sub set_env_from_file {
+ my ($env_name, $file_path) = @_;
+
+ open my $fh, '<', $file_path or die $!;
+ my $content = do { local $/; <$fh> };
+ close $fh;
+
+ $ENV{$env_name} = $content;
+}
+
+
+add_block_preprocessor(sub {
+ my ($block) = @_;
+
+ if (!$block->request) {
+ $block->set_value("request", "GET /t");
+ }
+
+});
+
+run_tests;
+
+__DATA__
+
+=== TEST 1: store two certs and keys in vault
+--- exec
+VAULT_TOKEN='root' VAULT_ADDR='http://0.0.0.0:8200' vault kv put kv/apisix/ssl \
+ test.com.crt=@t/certs/apisix.crt \
+ test.com.key=@t/certs/apisix.key \
+ test.com.2.crt=@t/certs/test2.crt \
+ test.com.2.key=@t/certs/test2.key
+--- response_body
+Success! Data written to: kv/apisix/ssl
+
+
+
+=== TEST 2: set secret
+--- config
+ location /t {
+ content_by_lua_block {
+ local t = require("lib.test_admin").test
+ local code, body = t('/apisix/admin/secrets/vault/test',
+ ngx.HTTP_PUT,
+ [[{
+ "uri": "http://0.0.0.0:8200",
+ "prefix": "kv/apisix",
+ "token": "root"
+ }]],
+ [[{
+ "key": "/apisix/secrets/vault/test",
+ "value": {
+ "uri": "http://0.0.0.0:8200",
+ "prefix": "kv/apisix",
+ "token": "root"
+ }
+ }]]
+ )
+ ngx.status = code
+ ngx.say(body)
+ }
+ }
+--- request
+GET /t
+--- response_body
+passed
+
+
+
+=== TEST 3: set ssl with two certs and keys in vault
+--- config
+ location /t {
+ content_by_lua_block {
+ local core = require("apisix.core")
+ local t = require("lib.test_admin")
+
+ local data = {
+ snis = {"test.com"},
+ key = "$secret://vault/test/ssl/test.com.key",
+ cert = "$secret://vault/test/ssl/test.com.crt",
+ keys = {"$secret://vault/test/ssl/test.com.2.key"},
+ certs = {"$secret://vault/test/ssl/test.com.2.crt"}
+ }
+
+ local code, body = t.test('/apisix/admin/ssls/1',
+ ngx.HTTP_PUT,
+ core.json.encode(data),
+ [[{
+ "value": {
+ "snis": ["test.com"],
+ "key": "$secret://vault/test/ssl/test.com.key",
+ "cert": "$secret://vault/test/ssl/test.com.crt",
+ "keys": ["$secret://vault/test/ssl/test.com.2.key"],
+ "certs": ["$secret://vault/test/ssl/test.com.2.crt"]
+ },
+ "key": "/apisix/ssls/1"
+ }]]
+ )
+
+ ngx.status = code
+ ngx.say(body)
+ }
+ }
+--- request
+GET /t
+--- response_body
+passed
+
+
+
+=== TEST 4: set route
+--- config
+ location /t {
+ content_by_lua_block {
+ local t = require("lib.test_admin").test
+ local code, body = t('/apisix/admin/routes/2',
+ ngx.HTTP_PUT,
+ [[{
+ "upstream": {
+ "nodes": {
+ "127.0.0.1:1980": 1
+ },
+ "type": "roundrobin"
+ },
+ "uri": "/hello"
+ }]]
+ )
+
+ if code >= 300 then
+ ngx.status = code
+ end
+ ngx.say(body)
+ }
+ }
+--- request
+GET /t
+--- response_body
+passed
+
+
+
+=== TEST 5: access to https with test.com
+--- exec
+curl -s -k https://test.com:1994/hello
+--- response_body
+hello world
+--- error_log
+fetching data from secret uri
+fetching data from secret uri
+fetching data from secret uri
+fetching data from secret uri
+
+
+
+=== TEST 6: set ssl with two certs and keys in env
+--- config
+ location /t {
+ content_by_lua_block {
+ local core = require("apisix.core")
+ local t = require("lib.test_admin")
+
+ local data = {
+ snis = {"test.com"},
+ key = "$env://TEST_KEY",
+ cert = "$env://TEST_CERT",
+ keys = {"$env://TEST2_KEY"},
+ certs = {"$env://TEST2_CERT"}
+ }
+
+ local code, body = t.test('/apisix/admin/ssls/1',
+ ngx.HTTP_PUT,
+ core.json.encode(data),
+ [[{
+ "value": {
+ "snis": ["test.com"],
+ "key": "$env://TEST_KEY",
+ "cert": "$env://TEST_CERT",
+ "keys": ["$env://TEST2_KEY"],
+ "certs": ["$env://TEST2_CERT"]
+ },
+ "key": "/apisix/ssls/1"
+ }]]
+ )
+
+ ngx.status = code
+ ngx.say(body)
+ }
+ }
+--- request
+GET /t
+--- response_body
+passed
+
+
+
+=== TEST 7: access to https with test.com
+--- exec
+curl -s -k https://test.com:1994/hello
+--- response_body
+hello world
+--- error_log
+fetching data from env uri
+fetching data from env uri
+fetching data from env uri
+fetching data from env uri
diff --git a/t/node/upstream-discovery.t b/t/node/upstream-discovery.t
index 941e89a3c069..62b240235b34 100644
--- a/t/node/upstream-discovery.t
+++ b/t/node/upstream-discovery.t
@@ -153,13 +153,11 @@ create_obj_fun(): upstream nodes:
=== TEST 3: create new server picker when nodes change, up_conf doesn't come from upstream
--- apisix_yaml
routes:
- -
- uris:
+ - uris:
- /hello
service_id: 1
services:
- -
- id: 1
+ - id: 1
upstream:
service_name: mock
discovery_type: mock
diff --git a/t/node/upstream-domain-with-special-dns.t b/t/node/upstream-domain-with-special-dns.t
index b064838cde28..650c87d10f0b 100644
--- a/t/node/upstream-domain-with-special-dns.t
+++ b/t/node/upstream-domain-with-special-dns.t
@@ -58,8 +58,7 @@ __DATA__
--- listen_ipv6
--- apisix_yaml
upstreams:
- -
- id: 1
+ - id: 1
nodes:
ipv6.test.local:1980: 1
type: roundrobin
@@ -74,8 +73,7 @@ hello world
--- log_level: debug
--- apisix_yaml
upstreams:
- -
- id: 1
+ - id: 1
nodes:
ttl.test.local:1980: 1
type: roundrobin
@@ -117,8 +115,7 @@ deployment:
config_provider: yaml
--- apisix_yaml
upstreams:
- -
- id: 1
+ - id: 1
nodes:
ttl.test.local:1980: 1
type: roundrobin
@@ -152,8 +149,7 @@ connect to 127.0.0.1:1053
--- log_level: debug
--- apisix_yaml
upstreams:
- -
- id: 1
+ - id: 1
nodes:
ttl.1s.test.local:1980: 1
type: roundrobin
@@ -200,8 +196,7 @@ deployment:
config_provider: yaml
--- apisix_yaml
upstreams:
- -
- id: 1
+ - id: 1
nodes:
ttl.test.local:1980: 1
type: roundrobin
diff --git a/t/node/upstream-domain-with-special-ipv6-dns.t b/t/node/upstream-domain-with-special-ipv6-dns.t
index 9c5e67a48ed1..dd90aec7d0d1 100644
--- a/t/node/upstream-domain-with-special-ipv6-dns.t
+++ b/t/node/upstream-domain-with-special-ipv6-dns.t
@@ -58,8 +58,7 @@ __DATA__
--- listen_ipv6
--- apisix_yaml
upstreams:
- -
- id: 1
+ - id: 1
nodes:
ipv6.test.local:1980: 1
type: roundrobin
diff --git a/t/plugin/body-transformer.t b/t/plugin/body-transformer.t
index 929ed1aac7c2..b6a266c47a82 100644
--- a/t/plugin/body-transformer.t
+++ b/t/plugin/body-transformer.t
@@ -473,8 +473,8 @@ qr/attempt to call global 'name' \(a string value\)/
local core = require("apisix.core")
local req_template = [[
{%
- local yaml = require("tinyyaml")
- local body = yaml.parse(_body)
+ local yaml = require("lyaml")
+ local body = yaml.load(_body)
%}
{"foobar":"{{body.foobar.foo .. " " .. body.foobar.bar}}"}
]]
diff --git a/t/plugin/cors.t b/t/plugin/cors.t
index 924a8b1fad6a..79e32513d98e 100644
--- a/t/plugin/cors.t
+++ b/t/plugin/cors.t
@@ -454,7 +454,7 @@ ExternalHeader1: val
ExternalHeader2: val
ExternalHeader3: val
--- response_body
-{"message":"Missing API key found in request"}
+{"message":"Missing API key in request"}
--- error_code: 401
--- response_headers
Access-Control-Allow-Origin: https://sub.domain.com
diff --git a/t/plugin/datadog.t b/t/plugin/datadog.t
index 8dade74a6a72..506abcc0fe41 100644
--- a/t/plugin/datadog.t
+++ b/t/plugin/datadog.t
@@ -498,3 +498,40 @@ message received: apisix\.apisix\.latency:[\d.]+\|h\|#source:apisix,new_tag:must
message received: apisix\.ingress\.size:[\d]+\|ms\|#source:apisix,new_tag:must,route_name:1,service_name:1,balancer_ip:[\d.]+,response_status:200,scheme:http
message received: apisix\.egress\.size:[\d]+\|ms\|#source:apisix,new_tag:must,route_name:1,service_name:1,balancer_ip:[\d.]+,response_status:200,scheme:http
/
+
+
+
+=== TEST 10: testing behaviour with consumer
+--- apisix_yaml
+consumers:
+ - username: user0
+ plugins:
+ key-auth:
+ key: user0
+routes:
+ - uri: /opentracing
+ name: datadog
+ upstream:
+ nodes:
+ "127.0.0.1:1982": 1
+ plugins:
+ datadog:
+ batch_max_size: 1
+ max_retry_count: 0
+ key-auth: {}
+#END
+--- request
+GET /opentracing?apikey=user0
+--- response_body
+opentracing
+--- wait: 0.5
+--- grep_error_log eval
+qr/message received: apisix(.+?(?=, ))/
+--- grep_error_log_out eval
+qr/message received: apisix\.request\.counter:1\|c\|#source:apisix,route_name:datadog,consumer:user0,balancer_ip:[\d.]+,response_status:200,scheme:http
+message received: apisix\.request\.latency:[\d.]+\|h\|#source:apisix,route_name:datadog,consumer:user0,balancer_ip:[\d.]+,response_status:200,scheme:http
+message received: apisix\.upstream\.latency:[\d.]+\|h\|#source:apisix,route_name:datadog,consumer:user0,balancer_ip:[\d.]+,response_status:200,scheme:http
+message received: apisix\.apisix\.latency:[\d.]+\|h\|#source:apisix,route_name:datadog,consumer:user0,balancer_ip:[\d.]+,response_status:200,scheme:http
+message received: apisix\.ingress\.size:[\d]+\|ms\|#source:apisix,route_name:datadog,consumer:user0,balancer_ip:[\d.]+,response_status:200,scheme:http
+message received: apisix\.egress\.size:[\d]+\|ms\|#source:apisix,route_name:datadog,consumer:user0,balancer_ip:[\d.]+,response_status:200,scheme:http
+/
diff --git a/t/plugin/dubbo-proxy/route.t b/t/plugin/dubbo-proxy/route.t
index 83a181d17215..d21b0629e0d3 100644
--- a/t/plugin/dubbo-proxy/route.t
+++ b/t/plugin/dubbo-proxy/route.t
@@ -39,6 +39,7 @@ plugins:
- dubbo-proxy
- response-rewrite
- proxy-rewrite
+ - key-auth
_EOC_
$block->set_value("extra_yaml_config", $extra_yaml_config);
@@ -161,9 +162,6 @@ dubbo success
apisix:
node_listen: 1984
enable_admin: true
-plugins:
- - key-auth
- - dubbo-proxy
--- config
location /t {
content_by_lua_block {
@@ -228,9 +226,6 @@ passed
apisix:
node_listen: 1984
enable_admin: true
-plugins:
- - key-auth
- - dubbo-proxy
--- error_code: 401
@@ -240,9 +235,6 @@ plugins:
apisix:
node_listen: 1984
enable_admin: true
-plugins:
- - key-auth
- - dubbo-proxy
--- more_headers
apikey: jack
--- response_body
diff --git a/t/plugin/grpc-transcode3.t b/t/plugin/grpc-transcode3.t
index bd4164d3b5ae..0a8ddf54ded6 100644
--- a/t/plugin/grpc-transcode3.t
+++ b/t/plugin/grpc-transcode3.t
@@ -525,3 +525,54 @@ location /t {
end
}
}
+
+
+
+=== TEST 13: bugfix - filter out illegal INT(string) formats
+--- config
+location /t {
+ content_by_lua_block {
+ local pcall = pcall
+ local require = require
+ local protoc = require("protoc")
+ local pb = require("pb")
+ local pb_encode = pb.encode
+
+ assert(protoc:load [[
+ syntax = "proto3";
+ message IntStringPattern {
+ int64 value = 1;
+ }]])
+
+ local patterns
+ do
+ local function G(pattern)
+ return {pattern, true}
+ end
+
+ local function B(pattern)
+ return {pattern, [[bad argument #2 to '?' (number/'#number' expected for field 'value', got string)]]}
+ end
+
+ patterns = {
+ G(1), G(2), G(-3), G("#123"), G("0xabF"), G("#-0x123abcdef"), G("-#0x123abcdef"), G("#0x123abcdef"), G("123"),
+ B("#a"), B("+aaa"), B("#aaaa"), B("#-aa"),
+ }
+ end
+
+ for _, p in pairs(patterns) do
+ local pattern = {
+ value = p[1],
+ }
+ local status, err = pcall(pb_encode, "IntStringPattern", pattern)
+ local res = status
+ if not res then
+ res = err
+ end
+ assert(res == p[2])
+ end
+ ngx.say("passed")
+ }
+}
+--- response_body
+passed
diff --git a/t/plugin/jwt-auth4.t b/t/plugin/jwt-auth4.t
index f91b233f4c4f..4eddaf92f021 100644
--- a/t/plugin/jwt-auth4.t
+++ b/t/plugin/jwt-auth4.t
@@ -120,3 +120,100 @@ __DATA__
}
--- response_body
safe-jws
+
+
+
+=== TEST 2: enable jwt auth plugin (with custom key_claim_name) using admin api
+--- config
+ location /t {
+ content_by_lua_block {
+ local t = require("lib.test_admin").test
+ local code, body = t('/apisix/admin/routes/4',
+ ngx.HTTP_PUT,
+ [[{
+ "plugins": {
+ "jwt-auth": {
+ "key": "custom-user-key",
+ "secret": "custom-secret-key",
+ "key_claim_name": "iss"
+ }
+ },
+ "upstream": {
+ "nodes": {
+ "127.0.0.1:1980": 1
+ },
+ "type": "roundrobin"
+ },
+ "uri": "/hello"
+ }]]
+ )
+
+ if code >= 300 then
+ ngx.status = code
+ ngx.say(body)
+ return
+ end
+
+ ngx.say(body)
+ }
+ }
+--- response_body
+passed
+
+
+
+=== TEST 3: verify that key_claim_name can be used to validate the Consumer JWT
+with a different claim than 'key'
+--- config
+ location /t {
+ content_by_lua_block {
+ local core = require("apisix.core")
+ local t = require("lib.test_admin").test
+
+ -- prepare consumer with a custom key claim name
+ local csm_code, csm_body = t('/apisix/admin/consumers',
+ ngx.HTTP_PUT,
+ [[{
+ "username": "mike",
+ "plugins": {
+ "jwt-auth": {
+ "key": "custom-user-key",
+ "secret": "custom-secret-key"
+ }
+ }
+ }]]
+ )
+
+ if csm_code >= 300 then
+ ngx.status = csm_code
+ ngx.say(csm_body)
+ return
+ end
+
+ -- generate JWT with custom key ("key_claim_name" = "iss")
+ local sign_code, sign_body, token = t('/apisix/plugin/jwt/sign?key=custom-user-key&key_claim_name=iss',
+ ngx.HTTP_GET
+ )
+
+ if sign_code > 200 then
+ ngx.status = sign_code
+ ngx.say(sign_body)
+ return
+ end
+
+ -- verify JWT using the custom key_claim_name
+ local ver_code, ver_body = t('/hello?jwt=' .. token,
+ ngx.HTTP_GET
+ )
+
+ if ver_code > 200 then
+ ngx.status = ver_code
+ ngx.say(ver_body)
+ return
+ end
+
+ ngx.say("verified-jwt")
+ }
+ }
+--- response_body
+verified-jwt
diff --git a/t/plugin/key-auth.t b/t/plugin/key-auth.t
index f68fe3088f4d..5c28e6bada99 100644
--- a/t/plugin/key-auth.t
+++ b/t/plugin/key-auth.t
@@ -173,7 +173,7 @@ apikey: 123
GET /hello
--- error_code: 401
--- response_body
-{"message":"Missing API key found in request"}
+{"message":"Missing API key in request"}
diff --git a/t/plugin/log-rotate2.t b/t/plugin/log-rotate2.t
index 76651dd7415f..636ad2853a06 100644
--- a/t/plugin/log-rotate2.t
+++ b/t/plugin/log-rotate2.t
@@ -25,7 +25,7 @@ no_root_location();
add_block_preprocessor(sub {
my ($block) = @_;
- if (! $block->extra_yaml_config) {
+ if (!defined $block->extra_yaml_config) {
my $extra_yaml_config = <<_EOC_;
plugins:
- log-rotate
@@ -136,10 +136,7 @@ passed
=== TEST 4: test rotate time align
---- yaml_config
-apisix:
- node_listen: 1984
- admin_key: ~
+--- extra_yaml_config
plugins:
- log-rotate
plugin_attr:
diff --git a/t/plugin/opentelemetry3.t b/t/plugin/opentelemetry3.t
index 6171d12f6276..1398fe89186e 100644
--- a/t/plugin/opentelemetry3.t
+++ b/t/plugin/opentelemetry3.t
@@ -158,7 +158,10 @@ qr/request log: \{.*"opentelemetry_context_traceparent":"00-\w{32}-\w{16}-01".*\
=== TEST 3: trigger opentelemetry with disable set variables
---- yaml_config
+--- extra_yaml_config
+plugins:
+ - http-logger
+ - opentelemetry
plugin_attr:
opentelemetry:
set_ngx_var: false
diff --git a/t/plugin/prometheus4.t b/t/plugin/prometheus4.t
index 89190448e731..758f2aae984f 100644
--- a/t/plugin/prometheus4.t
+++ b/t/plugin/prometheus4.t
@@ -333,15 +333,7 @@ passed
=== TEST 11: remove prometheus -> reload -> send batch request -> add prometheus for next tests
---- config
- location /t {
- content_by_lua_block {
- local http = require "resty.http"
- local httpc = http.new()
-
- local t = require("lib.test_admin").test
- ngx.sleep(0.1)
- local data = [[
+--- yaml_config
deployment:
role: traditional
role_traditional:
@@ -351,36 +343,19 @@ deployment:
apisix:
node_listen: 1984
plugins:
- - example-plugin
+ - example-plugin
plugin_attr:
- example-plugin:
- val: 1
- ]]
- require("lib.test_admin").set_config_yaml(data)
+ example-plugin:
+ val: 1
+--- config
+ location /t {
+ content_by_lua_block {
+ local t = require("lib.test_admin").test
local code, _, org_body = t('/v1/plugins/reload', ngx.HTTP_PUT)
- local code, body = t('/batch-process-metrics',
- ngx.HTTP_GET
- )
+ local code, body = t('/batch-process-metrics', ngx.HTTP_GET)
ngx.status = code
ngx.say(body)
-
- local data = [[
-deployment:
- role: traditional
- role_traditional:
- config_provider: etcd
- admin:
- admin_key: null
-apisix:
- node_listen: 1984
-plugins:
- - prometheus
-plugin_attr:
- example-plugin:
- val: 1
- ]]
- require("lib.test_admin").set_config_yaml(data)
}
}
--- request
@@ -392,6 +367,18 @@ qr/404 Not Found/
=== TEST 12: fetch prometheus metrics -> batch_process_entries metrics should not be present
+--- yaml_config
+deployment:
+ role: traditional
+ role_traditional:
+ config_provider: etcd
+ admin:
+ admin_key: null
+apisix:
+ node_listen: 1984
+plugins:
+ - prometheus
+ - public-api
--- request
GET /apisix/prometheus/metrics
--- error_code: 200
diff --git a/t/plugin/zipkin3.t b/t/plugin/zipkin3.t
index f3aef6b5d8fe..2d743fff0db4 100644
--- a/t/plugin/zipkin3.t
+++ b/t/plugin/zipkin3.t
@@ -119,7 +119,9 @@ qr/ngx_var.zipkin_context_traceparent:00-\w{32}-\w{16}-01*/
=== TEST 3: trigger zipkin with disable set variables
---- yaml_config
+--- extra_yaml_config
+plugins:
+ - zipkin
plugin_attr:
zipkin:
set_ngx_var: false
diff --git a/t/router/radixtree-host-uri2.t b/t/router/radixtree-host-uri2.t
index 2a6aa42f0a9e..40936f7db9bc 100644
--- a/t/router/radixtree-host-uri2.t
+++ b/t/router/radixtree-host-uri2.t
@@ -80,7 +80,7 @@ use config_provider: yaml
routes:
-
uri: /server_port
- host: *.test.com
+ host: "*.test.com"
upstream:
nodes:
"127.0.0.1:1981": 1
@@ -109,7 +109,7 @@ use config_provider: yaml
routes:
-
uri: /*
- host: *.test.com
+ host: "*.test.com"
upstream:
nodes:
"127.0.0.1:1981": 1
@@ -138,7 +138,7 @@ use config_provider: yaml
routes:
-
uri: /*
- host: *.test.com
+ host: "*.test.com"
filter_func: "function(vars) return vars.arg_name == 'json' end"
upstream:
nodes:
@@ -168,7 +168,7 @@ use config_provider: yaml
routes:
-
uri: /*
- host: *.test.com
+ host: "*.test.com"
filter_func: "function(vars) return vars.arg_name == 'json' end"
upstream:
nodes:
diff --git a/t/stream-node/priority-balancer.t b/t/stream-node/priority-balancer.t
index 30172e2e3c15..3d0b8a80d61c 100644
--- a/t/stream-node/priority-balancer.t
+++ b/t/stream-node/priority-balancer.t
@@ -54,8 +54,7 @@ __DATA__
=== TEST 1: sanity
--- apisix_yaml
stream_routes:
- -
- id: 1
+ - id: 1
upstream:
type: least_conn
nodes:
@@ -100,8 +99,7 @@ proxy request to 127.0.0.1:1995
=== TEST 2: default priority is 0
--- apisix_yaml
stream_routes:
- -
- id: 1
+ - id: 1
upstream:
type: least_conn
nodes:
@@ -144,8 +142,7 @@ proxy request to 127.0.0.1:1995
=== TEST 3: fix priority for nonarray nodes
--- apisix_yaml
stream_routes:
- -
- id: 1
+ - id: 1
upstream:
type: roundrobin
nodes:
diff --git a/utils/install-dependencies.sh b/utils/install-dependencies.sh
index 0a50fa9c33e5..305421e8363e 100755
--- a/utils/install-dependencies.sh
+++ b/utils/install-dependencies.sh
@@ -78,7 +78,7 @@ function install_dependencies_with_apt() {
sudo apt-get update
# install some compilation tools
- sudo apt-get install -y curl make gcc g++ cpanminus libpcre3 libpcre3-dev libldap2-dev unzip openresty-zlib-dev openresty-pcre-dev
+ sudo apt-get install -y curl make gcc g++ cpanminus libpcre3 libpcre3-dev libldap2-dev libyaml-dev unzip openresty-zlib-dev openresty-pcre-dev
}
# Identify the different distributions and call the corresponding function